raw/ifpga/base: fix SPI transaction
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
index 1d58220..ed4d001 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 
 #include <openssl/sha.h>       /* Needed to calculate pre-compute values */
 #include <rte_log.h>
 #include <rte_malloc.h>
 #include <rte_crypto_sym.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security.h>
+#endif
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
 #include "qat_sym_pmd.h"
 
+/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha1InitialState[] = {
+       0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
+       0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
+
+/* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha224InitialState[] = {
+       0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
+       0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
+       0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
+
+/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha256InitialState[] = {
+       0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
+       0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
+       0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
+
+/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha384InitialState[] = {
+       0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+       0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
+       0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
+       0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
+       0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
+       0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
+
+/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha512InitialState[] = {
+       0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
+       0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+       0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
+       0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
+       0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
+       0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
+
 /** Frees a context previously created
  *  Depends on openssl libcrypto
  */
@@ -35,7 +73,7 @@ bpi_cipher_ctx_free(void *bpi_ctx)
 static int
 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
                enum rte_crypto_cipher_operation direction __rte_unused,
-               uint8_t *key, void **ctx)
+               const uint8_t *key, uint16_t key_length, void **ctx)
 {
        const EVP_CIPHER *algo = NULL;
        int ret;
@@ -49,7 +87,10 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
        if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
                algo = EVP_des_ecb();
        else
-               algo = EVP_aes_128_ecb();
+               if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
+                       algo = EVP_aes_128_ecb();
+               else
+                       algo = EVP_aes_256_ecb();
 
        /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
        if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
@@ -242,7 +283,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
                session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
                break;
        case RTE_CRYPTO_CIPHER_NULL:
-               session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+               session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
+               session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
                break;
        case RTE_CRYPTO_CIPHER_KASUMI_F8:
                if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
@@ -285,6 +327,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
                                        cipher_xform->algo,
                                        cipher_xform->op,
                                        cipher_xform->key.data,
+                                       cipher_xform->key.length,
                                        &session->bpi_ctx);
                if (ret != 0) {
                        QAT_LOG(ERR, "failed to create DES BPI ctx");
@@ -303,6 +346,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
                                        cipher_xform->algo,
                                        cipher_xform->op,
                                        cipher_xform->key.data,
+                                       cipher_xform->key.length,
                                        &session->bpi_ctx);
                if (ret != 0) {
                        QAT_LOG(ERR, "failed to create AES BPI ctx");
@@ -333,10 +377,23 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
                }
                session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
                break;
+       case RTE_CRYPTO_CIPHER_AES_XTS:
+               if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
+                       QAT_LOG(ERR, "AES-XTS-192 not supported");
+                       ret = -EINVAL;
+                       goto error_out;
+               }
+               if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
+                               &session->qat_cipher_alg) != 0) {
+                       QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
+                       ret = -EINVAL;
+                       goto error_out;
+               }
+               session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
+               break;
        case RTE_CRYPTO_CIPHER_3DES_ECB:
        case RTE_CRYPTO_CIPHER_AES_ECB:
        case RTE_CRYPTO_CIPHER_AES_F8:
-       case RTE_CRYPTO_CIPHER_AES_XTS:
        case RTE_CRYPTO_CIPHER_ARC4:
                QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
                                cipher_xform->algo);
@@ -402,6 +459,79 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
        return 0;
 }
 
+static void
+qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
+               uint8_t hash_flag)
+{
+       struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+       struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+                       (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+                       session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+       /* Set the Use Extended Protocol Flags bit in LW 1 */
+       QAT_FIELD_SET(header->comn_req_flags,
+                       QAT_COMN_EXT_FLAGS_USED,
+                       QAT_COMN_EXT_FLAGS_BITPOS,
+                       QAT_COMN_EXT_FLAGS_MASK);
+
+       /* Set Hash Flags in LW 28 */
+       cd_ctrl->hash_flags |= hash_flag;
+
+       /* Set proto flags in LW 1 */
+       switch (session->qat_cipher_alg) {
+       case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_SNOW_3G_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags, 0);
+               break;
+       case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags,
+                               ICP_QAT_FW_LA_ZUC_3G_PROTO);
+               break;
+       default:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags, 0);
+               break;
+       }
+}
+
+static void
+qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
+               struct qat_sym_session *session)
+{
+       const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+       enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
+                       QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
+
+       if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+                       session->qat_cipher_alg !=
+                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+               session->min_qat_dev_gen = min_dev_gen;
+               qat_sym_session_set_ext_hash_flags(session,
+                       1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+       } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+                       session->qat_cipher_alg !=
+                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+               session->min_qat_dev_gen = min_dev_gen;
+               qat_sym_session_set_ext_hash_flags(session,
+                       1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+       } else if ((session->aes_cmac ||
+                       session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+                       (session->qat_cipher_alg ==
+                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+                       session->qat_cipher_alg ==
+                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+               session->min_qat_dev_gen = min_dev_gen;
+               qat_sym_session_set_ext_hash_flags(session, 0);
+       }
+}
+
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                struct rte_crypto_sym_xform *xform, void *session_private)
@@ -410,8 +540,16 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
        int ret;
        int qat_cmd_id;
 
+       /* Verify the session physical address is known */
+       rte_iova_t session_paddr = rte_mempool_virt2iova(session);
+       if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
+               QAT_LOG(ERR,
+                       "Session physical address unknown. Bad memory pool.");
+               return -EINVAL;
+       }
+
        /* Set context descriptor physical address */
-       session->cd_paddr = rte_mempool_virt2iova(session) +
+       session->cd_paddr = session_paddr +
                        offsetof(struct qat_sym_session, cd);
 
        session->min_qat_dev_gen = QAT_GEN1;
@@ -436,7 +574,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                break;
        case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
                if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-                       ret = qat_sym_session_configure_aead(xform,
+                       ret = qat_sym_session_configure_aead(dev, xform,
                                        session);
                        if (ret < 0)
                                return ret;
@@ -449,11 +587,13 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                                        xform, session);
                        if (ret < 0)
                                return ret;
+                       /* Special handling of mixed hash+cipher algorithms */
+                       qat_sym_session_handle_mixed(dev, session);
                }
                break;
        case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
                if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-                       ret = qat_sym_session_configure_aead(xform,
+                       ret = qat_sym_session_configure_aead(dev, xform,
                                        session);
                        if (ret < 0)
                                return ret;
@@ -466,6 +606,8 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                                        xform, session);
                        if (ret < 0)
                                return ret;
+                       /* Special handling of mixed hash+cipher algorithms */
+                       qat_sym_session_handle_mixed(dev, session);
                }
                break;
        case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -489,6 +631,72 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
        return 0;
 }
 
+static int
+qat_sym_session_handle_single_pass(struct qat_sym_session *session,
+               struct rte_crypto_aead_xform *aead_xform)
+{
+       struct icp_qat_fw_la_cipher_req_params *cipher_param =
+                       (void *) &session->fw_req.serv_specif_rqpars;
+
+       session->is_single_pass = 1;
+       session->min_qat_dev_gen = QAT_GEN3;
+       session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+       if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+               session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
+               ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+                       session->fw_req.comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+       } else {
+               /* Chacha-Poly is special case that use QAT CTR mode */
+               session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+       }
+       session->cipher_iv.offset = aead_xform->iv.offset;
+       session->cipher_iv.length = aead_xform->iv.length;
+       if (qat_sym_session_aead_create_cd_cipher(session,
+                       aead_xform->key.data, aead_xform->key.length))
+               return -EINVAL;
+       session->aad_len = aead_xform->aad_length;
+       session->digest_length = aead_xform->digest_length;
+       if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+               session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+               session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+               ICP_QAT_FW_LA_RET_AUTH_SET(
+                       session->fw_req.comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_RET_AUTH_RES);
+       } else {
+               session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+               session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+               ICP_QAT_FW_LA_CMP_AUTH_SET(
+                       session->fw_req.comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_CMP_AUTH_RES);
+       }
+       ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+                       session->fw_req.comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+       ICP_QAT_FW_LA_PROTO_SET(
+                       session->fw_req.comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_NO_PROTO);
+       session->fw_req.comn_hdr.service_cmd_id =
+                       ICP_QAT_FW_LA_CMD_CIPHER;
+       session->cd.cipher.cipher_config.val =
+                       ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+                               ICP_QAT_HW_CIPHER_AEAD_MODE,
+                               session->qat_cipher_alg,
+                               ICP_QAT_HW_CIPHER_NO_CONVERT,
+                               session->qat_dir);
+       QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
+                       aead_xform->digest_length,
+                       QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+                       QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+       session->cd.cipher.cipher_config.reserved =
+                       ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+                               aead_xform->aad_length);
+       cipher_param->spc_aad_sz = aead_xform->aad_length;
+       cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+
+       return 0;
+}
+
 int
 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
                                struct rte_crypto_sym_xform *xform,
@@ -496,10 +704,35 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 {
        struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
        struct qat_sym_dev_private *internals = dev->data->dev_private;
-       uint8_t *key_data = auth_xform->key.data;
+       const uint8_t *key_data = auth_xform->key.data;
        uint8_t key_length = auth_xform->key.length;
+       session->aes_cmac = 0;
+
+       session->auth_iv.offset = auth_xform->iv.offset;
+       session->auth_iv.length = auth_xform->iv.length;
+       session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
 
        switch (auth_xform->algo) {
+       case RTE_CRYPTO_AUTH_SHA1:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+               session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
+               break;
+       case RTE_CRYPTO_AUTH_SHA224:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+               session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
+               break;
+       case RTE_CRYPTO_AUTH_SHA256:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+               session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
+               break;
+       case RTE_CRYPTO_AUTH_SHA384:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+               session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
+               break;
+       case RTE_CRYPTO_AUTH_SHA512:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+               session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
+               break;
        case RTE_CRYPTO_AUTH_SHA1_HMAC:
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
                break;
@@ -518,6 +751,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
        case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
                break;
+       case RTE_CRYPTO_AUTH_AES_CMAC:
+               session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+               session->aes_cmac = 1;
+               break;
        case RTE_CRYPTO_AUTH_AES_GMAC:
                if (qat_sym_validate_aes_key(auth_xform->key.length,
                                &session->qat_cipher_alg) != 0) {
@@ -526,6 +763,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
                }
                session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+               if (session->auth_iv.length == 0)
+                       session->auth_iv.length = AES_GCM_J0_LEN;
 
                break;
        case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
@@ -549,13 +788,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
                }
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
                break;
-       case RTE_CRYPTO_AUTH_SHA1:
-       case RTE_CRYPTO_AUTH_SHA256:
-       case RTE_CRYPTO_AUTH_SHA512:
-       case RTE_CRYPTO_AUTH_SHA224:
-       case RTE_CRYPTO_AUTH_SHA384:
        case RTE_CRYPTO_AUTH_MD5:
-       case RTE_CRYPTO_AUTH_AES_CMAC:
        case RTE_CRYPTO_AUTH_AES_CBC_MAC:
                QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
                                auth_xform->algo);
@@ -566,9 +799,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
                return -EINVAL;
        }
 
-       session->auth_iv.offset = auth_xform->iv.offset;
-       session->auth_iv.length = auth_xform->iv.length;
-
        if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
                if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
                        session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
@@ -628,11 +858,16 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 }
 
 int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+                               struct rte_crypto_sym_xform *xform,
                                struct qat_sym_session *session)
 {
        struct rte_crypto_aead_xform *aead_xform = &xform->aead;
        enum rte_crypto_auth_operation crypto_operation;
+       struct qat_sym_dev_private *internals =
+                       dev->data->dev_private;
+       enum qat_device_gen qat_dev_gen =
+                       internals->qat_dev->qat_dev_gen;
 
        /*
         * Store AEAD IV parameters as cipher IV,
@@ -641,6 +876,9 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
        session->cipher_iv.offset = xform->aead.iv.offset;
        session->cipher_iv.length = xform->aead.iv.length;
 
+       session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
+
+       session->is_single_pass = 0;
        switch (aead_xform->algo) {
        case RTE_CRYPTO_AEAD_AES_GCM:
                if (qat_sym_validate_aes_key(aead_xform->key.length,
@@ -650,6 +888,14 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
                }
                session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+               if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
+                               QAT_AES_GCM_SPC_IV_SIZE) {
+                       return qat_sym_session_handle_single_pass(session,
+                                       aead_xform);
+               }
+               if (session->cipher_iv.length == 0)
+                       session->cipher_iv.length = AES_GCM_J0_LEN;
+
                break;
        case RTE_CRYPTO_AEAD_AES_CCM:
                if (qat_sym_validate_aes_key(aead_xform->key.length,
@@ -660,6 +906,13 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
                session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
                session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
                break;
+       case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+               if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
+                       return -EINVAL;
+               session->qat_cipher_alg =
+                               ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
+               return qat_sym_session_handle_single_pass(session,
+                                               aead_xform);
        default:
                QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
                                aead_xform->algo);
@@ -817,6 +1070,8 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
                return ICP_QAT_HW_SHA512_STATE1_SZ;
        case ICP_QAT_HW_AUTH_ALGO_MD5:
                return ICP_QAT_HW_MD5_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+               return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
        case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
                /* return maximum digest size in this case */
                return ICP_QAT_HW_SHA512_STATE1_SZ;
@@ -843,6 +1098,8 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
                return SHA512_CBLOCK;
        case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
                return 16;
+       case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+               return ICP_QAT_HW_AES_BLK_SZ;
        case ICP_QAT_HW_AUTH_ALGO_MD5:
                return MD5_CBLOCK;
        case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
@@ -991,11 +1248,28 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 #define HMAC_OPAD_VALUE        0x5c
 #define HASH_XCBC_PRECOMP_KEY_NUM 3
 
+static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+
+static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
+{
+       int i;
+
+       derived[0] = base[0] << 1;
+       for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
+               derived[i] = base[i] << 1;
+               derived[i - 1] |= base[i] >> 7;
+       }
+
+       if (base[0] & 0x80)
+               derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
+}
+
 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
                                const uint8_t *auth_key,
                                uint16_t auth_keylen,
                                uint8_t *p_state_buf,
-                               uint16_t *p_state_len)
+                               uint16_t *p_state_len,
+                               uint8_t aes_cmac)
 {
        int block_size;
        uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
@@ -1003,47 +1277,91 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
        int i;
 
        if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
-               static uint8_t qat_aes_xcbc_key_seed[
-                                       ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
-                       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-                       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-                       0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-                       0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-                       0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-                       0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-               };
 
-               uint8_t *in = NULL;
-               uint8_t *out = p_state_buf;
-               int x;
-               AES_KEY enc_key;
+               /* CMAC */
+               if (aes_cmac) {
+                       AES_KEY enc_key;
+                       uint8_t *in = NULL;
+                       uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
+                       uint8_t *k1, *k2;
 
-               in = rte_zmalloc("working mem for key",
-                               ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
-               if (in == NULL) {
-                       QAT_LOG(ERR, "Failed to alloc memory");
-                       return -ENOMEM;
-               }
+                       auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
+
+                       in = rte_zmalloc("AES CMAC K1",
+                                        ICP_QAT_HW_AES_128_KEY_SZ, 16);
+
+                       if (in == NULL) {
+                               QAT_LOG(ERR, "Failed to alloc memory");
+                               return -ENOMEM;
+                       }
+
+                       rte_memcpy(in, AES_CMAC_SEED,
+                                  ICP_QAT_HW_AES_128_KEY_SZ);
+                       rte_memcpy(p_state_buf, auth_key, auth_keylen);
 
-               rte_memcpy(in, qat_aes_xcbc_key_seed,
-                               ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
-               for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
                        if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
                                &enc_key) != 0) {
-                               rte_free(in -
-                                       (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
-                               memset(out -
-                                       (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
-                                       0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+                               rte_free(in);
                                return -EFAULT;
                        }
-                       AES_encrypt(in, out, &enc_key);
-                       in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
-                       out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+
+                       AES_encrypt(in, k0, &enc_key);
+
+                       k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+                       k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+                       aes_cmac_key_derive(k0, k1);
+                       aes_cmac_key_derive(k1, k2);
+
+                       memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+                       *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+                       rte_free(in);
+                       return 0;
+               } else {
+                       static uint8_t qat_aes_xcbc_key_seed[
+                                       ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+                               0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+                               0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+                               0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+                               0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+                               0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+                               0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+                       };
+
+                       uint8_t *in = NULL;
+                       uint8_t *out = p_state_buf;
+                       int x;
+                       AES_KEY enc_key;
+
+                       in = rte_zmalloc("working mem for key",
+                                       ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+                       if (in == NULL) {
+                               QAT_LOG(ERR, "Failed to alloc memory");
+                               return -ENOMEM;
+                       }
+
+                       rte_memcpy(in, qat_aes_xcbc_key_seed,
+                                       ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+                       for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+                               if (AES_set_encrypt_key(auth_key,
+                                                       auth_keylen << 3,
+                                                       &enc_key) != 0) {
+                                       rte_free(in -
+                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+                                       memset(out -
+                                          (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+                                       return -EFAULT;
+                               }
+                               AES_encrypt(in, out, &enc_key);
+                               in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+                               out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+                       }
+                       *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+                       rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+                       return 0;
                }
-               *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
-               rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
-               return 0;
+
        } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
                (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
                uint8_t *in = NULL;
@@ -1074,8 +1392,8 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
        }
 
        block_size = qat_hash_get_block_size(hash_alg);
-       if (block_size <= 0)
-               return -EFAULT;
+       if (block_size < 0)
+               return block_size;
        /* init ipad and opad from key and xor with fixed values */
        memset(ipad, 0, block_size);
        memset(opad, 0, block_size);
@@ -1189,7 +1507,7 @@ qat_get_crypto_proto_flag(uint16_t flags)
 }
 
 int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
-                                               uint8_t *cipherkey,
+                                               const uint8_t *cipherkey,
                                                uint32_t cipherkeylen)
 {
        struct icp_qat_hw_cipher_algo_blk *cipher;
@@ -1344,7 +1662,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
 }
 
 int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
-                                               uint8_t *authkey,
+                                               const uint8_t *authkey,
                                                uint32_t authkeylen,
                                                uint32_t aad_length,
                                                uint32_t digestsize,
@@ -1361,8 +1679,8 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
        struct icp_qat_fw_la_auth_req_params *auth_param =
                (struct icp_qat_fw_la_auth_req_params *)
                ((char *)&req_tmpl->serv_specif_rqpars +
-               sizeof(struct icp_qat_fw_la_cipher_req_params));
-       uint16_t state1_size = 0, state2_size = 0;
+               ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+       uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
        uint16_t hash_offset, cd_size;
        uint32_t *aad_len = NULL;
        uint32_t wordIndex  = 0;
@@ -1412,16 +1730,25 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
        hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
        hash->auth_config.reserved = 0;
        hash->auth_config.config =
-                       ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                       ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
                                cdesc->qat_hash_alg, digestsize);
 
-       if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+       if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
+               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
                || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
-               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
+               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
+               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
+               || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
+                       )
                hash->auth_counter.counter = 0;
-       else
-               hash->auth_counter.counter = rte_bswap32(
-                               qat_hash_get_block_size(cdesc->qat_hash_alg));
+       else {
+               int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
+
+               if (block_size < 0)
+                       return block_size;
+               hash->auth_counter.counter = rte_bswap32(block_size);
+       }
 
        cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
 
@@ -1430,40 +1757,90 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
         */
        switch (cdesc->qat_hash_alg) {
        case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
-                       authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+               if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
+                       /* Plain SHA-1 */
+                       rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
+                                       sizeof(sha1InitialState));
+                       state1_size = qat_hash_get_state1_size(
+                                       cdesc->qat_hash_alg);
+                       break;
+               }
+               /* SHA-1 HMAC */
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
+                       authkeylen, cdesc->cd_cur_ptr, &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(SHA)precompute failed");
                        return -EFAULT;
                }
                state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
                break;
        case ICP_QAT_HW_AUTH_ALGO_SHA224:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
-                       authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+               if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
+                       /* Plain SHA-224 */
+                       rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
+                                       sizeof(sha224InitialState));
+                       state1_size = qat_hash_get_state1_size(
+                                       cdesc->qat_hash_alg);
+                       break;
+               }
+               /* SHA-224 HMAC */
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
+                       authkeylen, cdesc->cd_cur_ptr, &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(SHA)precompute failed");
                        return -EFAULT;
                }
                state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
                break;
        case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
-                       authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+               if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
+                       /* Plain SHA-256 */
+                       rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
+                                       sizeof(sha256InitialState));
+                       state1_size = qat_hash_get_state1_size(
+                                       cdesc->qat_hash_alg);
+                       break;
+               }
+               /* SHA-256 HMAC */
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
+                       authkeylen, cdesc->cd_cur_ptr,  &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(SHA)precompute failed");
                        return -EFAULT;
                }
                state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
                break;
        case ICP_QAT_HW_AUTH_ALGO_SHA384:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
-                       authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+               if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
+                       /* Plain SHA-384 */
+                       rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
+                                       sizeof(sha384InitialState));
+                       state1_size = qat_hash_get_state1_size(
+                                       cdesc->qat_hash_alg);
+                       break;
+               }
+               /* SHA-384 HMAC */
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
+                       authkeylen, cdesc->cd_cur_ptr, &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(SHA)precompute failed");
                        return -EFAULT;
                }
                state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
                break;
        case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
-                       authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+               if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
+                       /* Plain SHA-512 */
+                       rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
+                                       sizeof(sha512InitialState));
+                       state1_size = qat_hash_get_state1_size(
+                                       cdesc->qat_hash_alg);
+                       break;
+               }
+               /* SHA-512 HMAC */
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
+                       authkeylen, cdesc->cd_cur_ptr,  &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(SHA)precompute failed");
                        return -EFAULT;
                }
@@ -1471,10 +1848,16 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
                break;
        case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
                state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+               if (cdesc->aes_cmac)
+                       memset(cdesc->cd_cur_ptr, 0, state1_size);
                if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
                        authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
-                       &state2_size)) {
-                       QAT_LOG(ERR, "(XCBC)precompute failed");
+                       &state2_size, cdesc->aes_cmac)) {
+                       cdesc->aes_cmac ? QAT_LOG(ERR,
+                                                 "(CMAC)precompute failed")
+                                       : QAT_LOG(ERR,
+                                                 "(XCBC)precompute failed");
                        return -EFAULT;
                }
                break;
@@ -1482,9 +1865,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
        case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
                qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
                state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
-               if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
-                       authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
-                       &state2_size)) {
+               if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
+                       authkeylen, cdesc->cd_cur_ptr + state1_size,
+                       &state2_size, cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(GCM)precompute failed");
                        return -EFAULT;
                }
@@ -1519,7 +1902,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
                memcpy(cipherconfig->key, authkey, authkeylen);
                memset(cipherconfig->key + authkeylen,
                                0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
-               cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+               cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
                                authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
                auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
                break;
@@ -1535,16 +1918,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
                        + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
 
                memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
-               cdesc->cd_cur_ptr += state1_size + state2_size
-                       + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+               cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
                auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
                cdesc->min_qat_dev_gen = QAT_GEN2;
 
                break;
        case ICP_QAT_HW_AUTH_ALGO_MD5:
-               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
-                       authkey, authkeylen, cdesc->cd_cur_ptr,
-                       &state1_size)) {
+               if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
+                       authkeylen, cdesc->cd_cur_ptr, &state1_size,
+                       cdesc->aes_cmac)) {
                        QAT_LOG(ERR, "(MD5)precompute failed");
                        return -EFAULT;
                }
@@ -1622,7 +2004,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
                         RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
                                        >> 3);
 
-       cdesc->cd_cur_ptr += state1_size + state2_size;
+       cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
        cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
 
        cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
@@ -1656,6 +2038,9 @@ int qat_sym_validate_aes_docsisbpi_key(int key_len,
        case ICP_QAT_HW_AES_128_KEY_SZ:
                *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
                break;
+       case ICP_QAT_HW_AES_256_KEY_SZ:
+               *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+               break;
        default:
                return -EINVAL;
        }
@@ -1723,3 +2108,151 @@ int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
        }
        return 0;
 }
+
+#ifdef RTE_LIBRTE_SECURITY
+static int
+qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
+{
+       struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
+       struct rte_security_docsis_xform *docsis = &conf->docsis;
+
+       /* CRC generate -> Cipher encrypt */
+       if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
+
+               if (crypto_sym != NULL &&
+                   crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+                   crypto_sym->cipher.algo ==
+                                       RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+                   (crypto_sym->cipher.key.length ==
+                                       ICP_QAT_HW_AES_128_KEY_SZ ||
+                    crypto_sym->cipher.key.length ==
+                                       ICP_QAT_HW_AES_256_KEY_SZ) &&
+                   crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
+                   crypto_sym->next == NULL) {
+                       return 0;
+               }
+       /* Cipher decrypt -> CRC verify */
+       } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
+
+               if (crypto_sym != NULL &&
+                   crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+                   crypto_sym->cipher.algo ==
+                                       RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+                   (crypto_sym->cipher.key.length ==
+                                       ICP_QAT_HW_AES_128_KEY_SZ ||
+                    crypto_sym->cipher.key.length ==
+                                       ICP_QAT_HW_AES_256_KEY_SZ) &&
+                   crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
+                   crypto_sym->next == NULL) {
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int
+qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
+               struct rte_security_session_conf *conf, void *session_private)
+{
+       int ret;
+       int qat_cmd_id;
+       struct rte_crypto_sym_xform *xform = NULL;
+       struct qat_sym_session *session = session_private;
+
+       /* Clear the session */
+       memset(session, 0, qat_sym_session_get_private_size(dev));
+
+       ret = qat_sec_session_check_docsis(conf);
+       if (ret) {
+               QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
+               return ret;
+       }
+
+       xform = conf->crypto_xform;
+
+       /* Verify the session physical address is known */
+       rte_iova_t session_paddr = rte_mempool_virt2iova(session);
+       if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
+               QAT_LOG(ERR,
+                       "Session physical address unknown. Bad memory pool.");
+               return -EINVAL;
+       }
+
+       /* Set context descriptor physical address */
+       session->cd_paddr = session_paddr +
+                       offsetof(struct qat_sym_session, cd);
+
+       session->min_qat_dev_gen = QAT_GEN1;
+
+       /* Get requested QAT command id - should be cipher */
+       qat_cmd_id = qat_get_cmd_id(xform);
+       if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
+               QAT_LOG(ERR, "Unsupported xform chain requested");
+               return -ENOTSUP;
+       }
+       session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+
+       ret = qat_sym_session_configure_cipher(dev, xform, session);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int
+qat_security_session_create(void *dev,
+                               struct rte_security_session_conf *conf,
+                               struct rte_security_session *sess,
+                               struct rte_mempool *mempool)
+{
+       void *sess_private_data;
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       int ret;
+
+       if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
+                       conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
+               QAT_LOG(ERR, "Invalid security protocol");
+               return -EINVAL;
+       }
+
+       if (rte_mempool_get(mempool, &sess_private_data)) {
+               QAT_LOG(ERR, "Couldn't get object from session mempool");
+               return -ENOMEM;
+       }
+
+       ret = qat_sec_session_set_docsis_parameters(cdev, conf,
+                       sess_private_data);
+       if (ret != 0) {
+               QAT_LOG(ERR, "Failed to configure session parameters");
+               /* Return session to mempool */
+               rte_mempool_put(mempool, sess_private_data);
+               return ret;
+       }
+
+       set_sec_session_private_data(sess, sess_private_data);
+
+       return ret;
+}
+
+int
+qat_security_session_destroy(void *dev __rte_unused,
+                                struct rte_security_session *sess)
+{
+       void *sess_priv = get_sec_session_private_data(sess);
+       struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+       if (sess_priv) {
+               if (s->bpi_ctx)
+                       bpi_cipher_ctx_free(s->bpi_ctx);
+               memset(s, 0, qat_sym_session_get_private_size(dev));
+               struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+               set_sec_session_private_data(sess, NULL);
+               rte_mempool_put(sess_mp, sess_priv);
+       }
+       return 0;
+}
+#endif