crypto/qat: refactor asym algorithm macros and logs
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
index 6459a02..8444f1a 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2019 NXP
+ *   Copyright 2016-2022 NXP
  *
  */
 
@@ -18,7 +18,7 @@
 #include <rte_cycles.h>
 #include <rte_kvargs.h>
 #include <rte_dev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #include <rte_common.h>
 #include <rte_fslmc.h>
 #include <fslmc_vfio.h>
@@ -28,6 +28,7 @@
 #include <fsl_dpopr.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
+#include <rte_hexdump.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_event.h"
@@ -36,6 +37,7 @@
 /* RTA header files */
 #include <desc/ipsec.h>
 #include <desc/pdcp.h>
+#include <desc/sdap.h>
 #include <desc/algo.h>
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS      32000
-#define FLE_POOL_BUF_SIZE      256
-#define FLE_POOL_CACHE_SIZE    512
-#define FLE_SG_MEM_SIZE(num)   (FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND   -114
-#define SEC_FLC_DHR_INBOUND    0
 
-static uint8_t cryptodev_driver_id;
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+#define DRIVER_STRICT_ORDER "drv_strict_order"
 
-int dpaa2_logtype_sec;
+/* DPAA2_SEC_DP_DUMP levels */
+enum dpaa2_sec_dump_levels {
+       DPAA2_SEC_DP_NO_DUMP,
+       DPAA2_SEC_DP_ERR_DUMP,
+       DPAA2_SEC_DP_FULL_DUMP
+};
 
-#ifdef RTE_LIBRTE_SECURITY
+uint8_t cryptodev_driver_id;
+uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
+
+static inline void
+free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
+{
+       struct qbman_fle *fle;
+       struct rte_crypto_op *op;
+
+#ifdef RTE_LIB_SECURITY
+       if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
+               return;
+#endif
+       fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+       op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+       /* free the fle memory */
+       if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
+               rte_mempool_put(qp->fle_pool, (void *)(fle-1));
+       else
+               rte_free((void *)(fle-1));
+}
+
+#ifdef RTE_LIB_SECURITY
 static inline int
 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
                           struct rte_crypto_op *op,
@@ -166,7 +189,8 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess,
         * mbuf priv after sym_op.
         */
        if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
-               uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
+               uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
+                                       sess->pdcp.hfn_ovd_offset);
                /*enable HFN override override */
                DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
                DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
@@ -180,7 +204,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_proto_compound_fd(dpaa2_sec_session *sess,
               struct rte_crypto_op *op,
-              struct qbman_fd *fd, uint16_t bpid)
+              struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -197,9 +221,9 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
        flc = &priv->flc_desc[0].flc;
 
        /* we are using the first FLE entry to store Mbuf */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_DP_ERR("Memory alloc failed");
+               DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -241,7 +265,8 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
         * mbuf priv after sym_op.
         */
        if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
-               uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
+               uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
+                                       sess->pdcp.hfn_ovd_offset);
                /*enable HFN override override */
                DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
                DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
@@ -255,11 +280,11 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
 static inline int
 build_proto_fd(dpaa2_sec_session *sess,
               struct rte_crypto_op *op,
-              struct qbman_fd *fd, uint16_t bpid)
+              struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        if (sym_op->m_dst)
-               return build_proto_compound_fd(sess, op, fd, bpid);
+               return build_proto_compound_fd(sess, op, fd, bpid, qp);
 
        struct ctxt_priv *priv = sess->ctxt;
        struct sec_flow_context *flc;
@@ -434,7 +459,8 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_authenc_gcm_fd(dpaa2_sec_session *sess,
                     struct rte_crypto_op *op,
-                    struct qbman_fd *fd, uint16_t bpid)
+                    struct qbman_fd *fd, uint16_t bpid,
+                    struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -458,9 +484,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
         * to get the MBUF Addr from the previous FLE.
         * We can have a better approach to use the inline Mbuf
         */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -721,7 +747,7 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
                 struct rte_crypto_op *op,
-                struct qbman_fd *fd, uint16_t bpid)
+                struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -750,9 +776,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
         * to get the MBUF Addr from the previous FLE.
         * We can have a better approach to use the inline Mbuf
         */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -983,7 +1009,7 @@ static inline int build_auth_sg_fd(
 
 static inline int
 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-             struct qbman_fd *fd, uint16_t bpid)
+             struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct qbman_fle *fle, *sge;
@@ -1007,9 +1033,9 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
                data_offset = data_offset >> 3;
        }
 
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1230,7 +1256,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 static int
 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-               struct qbman_fd *fd, uint16_t bpid)
+               struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct qbman_fle *fle, *sge;
@@ -1260,9 +1286,9 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
        else
                dst = sym_op->m_src;
 
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1347,7 +1373,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 static inline int
 build_sec_fd(struct rte_crypto_op *op,
-            struct qbman_fd *fd, uint16_t bpid)
+            struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        int ret = -1;
        dpaa2_sec_session *sess;
@@ -1355,16 +1381,20 @@ build_sec_fd(struct rte_crypto_op *op,
        if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
                sess = (dpaa2_sec_session *)get_sym_session_private_data(
                                op->sym->session, cryptodev_driver_id);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
                sess = (dpaa2_sec_session *)get_sec_session_private_data(
                                op->sym->sec_session);
 #endif
-       else
+       else {
+               DPAA2_SEC_DP_ERR("Session type invalid\n");
                return -ENOTSUP;
+       }
 
-       if (!sess)
+       if (!sess) {
+               DPAA2_SEC_DP_ERR("Session not available\n");
                return -EINVAL;
+       }
 
        /* Any of the buffer is segmented*/
        if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
@@ -1383,7 +1413,7 @@ build_sec_fd(struct rte_crypto_op *op,
                case DPAA2_SEC_CIPHER_HASH:
                        ret = build_authenc_sg_fd(sess, op, fd, bpid);
                        break;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                case DPAA2_SEC_IPSEC:
                case DPAA2_SEC_PDCP:
                        ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
@@ -1396,23 +1426,23 @@ build_sec_fd(struct rte_crypto_op *op,
        } else {
                switch (sess->ctxt_type) {
                case DPAA2_SEC_CIPHER:
-                       ret = build_cipher_fd(sess, op, fd, bpid);
+                       ret = build_cipher_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_AUTH:
-                       ret = build_auth_fd(sess, op, fd, bpid);
+                       ret = build_auth_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_AEAD:
-                       ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+                       ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_CIPHER_HASH:
-                       ret = build_authenc_fd(sess, op, fd, bpid);
+                       ret = build_authenc_fd(sess, op, fd, bpid, qp);
                        break;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                case DPAA2_SEC_IPSEC:
-                       ret = build_proto_fd(sess, op, fd, bpid);
+                       ret = build_proto_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_PDCP:
-                       ret = build_proto_compound_fd(sess, op, fd, bpid);
+                       ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
                        break;
 #endif
                case DPAA2_SEC_HASH_CIPHER:
@@ -1471,23 +1501,24 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
                        dpaa2_eqcr_size : nb_ops;
 
                for (loop = 0; loop < frames_to_send; loop++) {
-                       if ((*ops)->sym->m_src->seqn) {
-                        uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
-
-                        flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
-                        DPAA2_PER_LCORE_DQRR_SIZE--;
-                        DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
-                        (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+                       if (*dpaa2_seqn((*ops)->sym->m_src)) {
+                               if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
+                                       DPAA2_PER_LCORE_DQRR_SIZE--;
+                                       DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
+                                       *dpaa2_seqn((*ops)->sym->m_src) &
+                                       QBMAN_EQCR_DCA_IDXMASK);
+                               }
+                               flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
+                               *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
                        }
 
                        /*Clear the unused FD fields before sending*/
                        memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
                        mb_pool = (*ops)->sym->m_src->pool;
                        bpid = mempool_to_bpid(mb_pool);
-                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
                        if (ret) {
-                               DPAA2_SEC_ERR("error: Improper packet contents"
-                                             " for crypto operation");
+                               DPAA2_SEC_DP_DEBUG("FD build failed\n");
                                goto skip_tx;
                        }
                        ops++;
@@ -1505,6 +1536,13 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
                                if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
                                        num_tx += loop;
                                        nb_ops -= loop;
+                                       DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
+                                       /* freeing the fle buffers */
+                                       while (loop < frames_to_send) {
+                                               free_fle(&fd_arr[loop],
+                                                               dpaa2_qp);
+                                               loop++;
+                                       }
                                        goto skip_tx;
                                }
                        } else {
@@ -1522,7 +1560,7 @@ skip_tx:
        return num_tx;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
 static inline struct rte_crypto_op *
 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
 {
@@ -1531,6 +1569,10 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
        int16_t diff = 0;
        dpaa2_sec_session *sess_priv __rte_unused;
 
+       if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+               DPAA2_SEC_ERR("error: non inline buffer");
+               return NULL;
+       }
        struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
                DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
@@ -1549,19 +1591,26 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
        else
                mbuf->data_off += SEC_FLC_DHR_INBOUND;
 
+       if (unlikely(fd->simple.frc)) {
+               DPAA2_SEC_ERR("SEC returned Error - %x",
+                               fd->simple.frc);
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+       } else {
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+       }
+
        return op;
 }
 #endif
 
 static inline struct rte_crypto_op *
-sec_fd_to_mbuf(const struct qbman_fd *fd)
+sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
 {
        struct qbman_fle *fle;
        struct rte_crypto_op *op;
-       struct ctxt_priv *priv;
        struct rte_mbuf *dst, *src;
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
                return sec_simple_fd_to_mbuf(fd);
 #endif
@@ -1577,11 +1626,6 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
         * We can have a better approach to use the inline Mbuf
         */
 
-       if (unlikely(DPAA2_GET_FD_IVP(fd))) {
-               /* TODO complete it. */
-               DPAA2_SEC_ERR("error: non inline buffer");
-               return NULL;
-       }
        op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
 
        /* Prefeth op */
@@ -1594,7 +1638,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
        } else
                dst = src;
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
                uint16_t len = DPAA2_GET_FD_LEN(fd);
                dst->pkt_len = len;
@@ -1617,14 +1661,261 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 
        /* free the fle memory */
        if (likely(rte_pktmbuf_is_contiguous(src))) {
-               priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
-               rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+               rte_mempool_put(qp->fle_pool, (void *)(fle-1));
        } else
                rte_free((void *)(fle-1));
 
        return op;
 }
 
+static void
+dpaa2_sec_dump(struct rte_crypto_op *op)
+{
+       int i;
+       dpaa2_sec_session *sess = NULL;
+       struct ctxt_priv *priv;
+       uint8_t bufsize;
+       struct rte_crypto_sym_op *sym_op;
+
+       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+               sess = (dpaa2_sec_session *)get_sym_session_private_data(
+                       op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+       else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+               sess = (dpaa2_sec_session *)get_sec_session_private_data(
+                       op->sym->sec_session);
+#endif
+
+       if (sess == NULL)
+               goto mbuf_dump;
+
+       priv = (struct ctxt_priv *)sess->ctxt;
+       printf("\n****************************************\n"
+               "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+               "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+               "\tCipher key len:\t%zd\n", sess->ctxt_type,
+               (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+               sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+               sess->cipher_key.length);
+               rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+                               sess->cipher_key.length);
+               rte_hexdump(stdout, "auth key", sess->auth_key.data,
+                               sess->auth_key.length);
+       printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+               "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
+               " len:\t%d\n\taead cipher text:\t%d\n",
+               sess->auth_key.length, sess->iv.length, sess->iv.offset,
+               sess->digest_length, sess->status,
+               sess->ext_params.aead_ctxt.auth_only_len,
+               sess->ext_params.aead_ctxt.auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+       printf("PDCP session params:\n"
+               "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+               "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
+               "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+               sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+               sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+               sess->pdcp.hfn_threshold);
+
+#endif
+       bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
+       printf("Descriptor Dump:\n");
+       for (i = 0; i < bufsize; i++)
+               printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
+
+       printf("\n");
+mbuf_dump:
+       sym_op = op->sym;
+       if (sym_op->m_src) {
+               printf("Source mbuf:\n");
+               rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
+       }
+       if (sym_op->m_dst) {
+               printf("Destination mbuf:\n");
+               rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
+       }
+
+       printf("Session address = %p\ncipher offset: %d, length: %d\n"
+               "auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
+               , sym_op->session,
+               sym_op->cipher.data.offset, sym_op->cipher.data.length,
+               sym_op->auth.data.offset, sym_op->auth.data.length,
+               sym_op->aead.data.offset, sym_op->aead.data.length);
+       printf("\n");
+
+}
+
+static void
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
+                         struct dpaa2_queue *dpaa2_q)
+{
+       struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+       struct rte_crypto_op *op;
+       struct qbman_fd *fd;
+       struct dpaa2_sec_qp *dpaa2_qp;
+
+       dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
+       fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+       op = sec_fd_to_mbuf(fd, dpaa2_qp);
+       /* Instead of freeing, enqueue it to the sec tx queue (sec->core)
+        * after setting an error in FD. But this will have performance impact.
+        */
+       rte_pktmbuf_free(op->sym->m_src);
+}
+
+static void
+dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+                            struct rte_mbuf *m,
+                            struct qbman_eq_desc *eqdesc)
+{
+       struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+       struct eqresp_metadata *eqresp_meta;
+       struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
+       uint16_t orpid, seqnum;
+       uint8_t dq_idx;
+
+       if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+               orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
+                       DPAA2_EQCR_OPRID_SHIFT;
+               seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
+                       DPAA2_EQCR_SEQNUM_SHIFT;
+
+
+               if (!priv->en_loose_ordered) {
+                       qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+                       qbman_eq_desc_set_response(eqdesc, (uint64_t)
+                               DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+                               dpio_dev->eqresp_pi]), 1);
+                       qbman_eq_desc_set_token(eqdesc, 1);
+
+                       eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
+                       eqresp_meta->dpaa2_q = dpaa2_q;
+                       eqresp_meta->mp = m->pool;
+
+                       dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+                               dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
+               } else {
+                       qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+               }
+       } else {
+               dq_idx = *dpaa2_seqn(m) - 1;
+               qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+               DPAA2_PER_LCORE_DQRR_SIZE--;
+               DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+       }
+       *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
+}
+
+
+static uint16_t
+dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
+                       uint16_t nb_ops)
+{
+       /* Function to transmit the frames to given device and VQ*/
+       uint32_t loop;
+       int32_t ret;
+       struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+       uint32_t frames_to_send, num_free_eq_desc, retry_count;
+       struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+       struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+       struct qbman_swp *swp;
+       uint16_t num_tx = 0;
+       uint16_t bpid;
+       struct rte_mempool *mb_pool;
+       struct dpaa2_sec_dev_private *priv =
+                               dpaa2_qp->tx_vq.crypto_data->dev_private;
+
+       if (unlikely(nb_ops == 0))
+               return 0;
+
+       if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+               DPAA2_SEC_ERR("sessionless crypto op not supported");
+               return 0;
+       }
+
+       if (!DPAA2_PER_LCORE_DPIO) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       DPAA2_SEC_ERR("Failure in affining portal");
+                       return 0;
+               }
+       }
+       swp = DPAA2_PER_LCORE_PORTAL;
+
+       while (nb_ops) {
+               frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+                       dpaa2_eqcr_size : nb_ops;
+
+               if (!priv->en_loose_ordered) {
+                       if (*dpaa2_seqn((*ops)->sym->m_src)) {
+                               num_free_eq_desc = dpaa2_free_eq_descriptors();
+                               if (num_free_eq_desc < frames_to_send)
+                                       frames_to_send = num_free_eq_desc;
+                       }
+               }
+
+               for (loop = 0; loop < frames_to_send; loop++) {
+                       /*Prepare enqueue descriptor*/
+                       qbman_eq_desc_clear(&eqdesc[loop]);
+                       qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
+
+                       if (*dpaa2_seqn((*ops)->sym->m_src))
+                               dpaa2_sec_set_enqueue_descriptor(
+                                               &dpaa2_qp->tx_vq,
+                                               (*ops)->sym->m_src,
+                                               &eqdesc[loop]);
+                       else
+                               qbman_eq_desc_set_no_orp(&eqdesc[loop],
+                                                        DPAA2_EQ_RESP_ERR_FQ);
+
+                       /*Clear the unused FD fields before sending*/
+                       memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+                       mb_pool = (*ops)->sym->m_src->pool;
+                       bpid = mempool_to_bpid(mb_pool);
+                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
+                       if (ret) {
+                               DPAA2_SEC_DP_DEBUG("FD build failed\n");
+                               goto skip_tx;
+                       }
+                       ops++;
+               }
+
+               loop = 0;
+               retry_count = 0;
+               while (loop < frames_to_send) {
+                       ret = qbman_swp_enqueue_multiple_desc(swp,
+                                       &eqdesc[loop], &fd_arr[loop],
+                                       frames_to_send - loop);
+                       if (unlikely(ret < 0)) {
+                               retry_count++;
+                               if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+                                       num_tx += loop;
+                                       nb_ops -= loop;
+                                       DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
+                                       /* freeing the fle buffers */
+                                       while (loop < frames_to_send) {
+                                               free_fle(&fd_arr[loop],
+                                                               dpaa2_qp);
+                                               loop++;
+                                       }
+                                       goto skip_tx;
+                               }
+                       } else {
+                               loop += ret;
+                               retry_count = 0;
+                       }
+               }
+
+               num_tx += loop;
+               nb_ops -= loop;
+       }
+
+skip_tx:
+       dpaa2_qp->tx_vq.tx_pkts += num_tx;
+       dpaa2_qp->tx_vq.err_pkts += nb_ops;
+       return num_tx;
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
                        uint16_t nb_ops)
@@ -1702,12 +1993,18 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
                }
 
                fd = qbman_result_DQ_fd(dq_storage);
-               ops[num_rx] = sec_fd_to_mbuf(fd);
+               ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
 
                if (unlikely(fd->simple.frc)) {
                        /* TODO Parse SEC errors */
-                       DPAA2_SEC_ERR("SEC returned Error - %x",
-                                     fd->simple.frc);
+                       if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
+                               DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+                                                fd->simple.frc);
+                               if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
+                                       dpaa2_sec_dump(ops[num_rx]);
+                       }
+
+                       dpaa2_qp->rx_vq.err_pkts += 1;
                        ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
                } else {
                        ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -1719,7 +2016,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
        dpaa2_qp->rx_vq.rx_pkts += num_rx;
 
-       DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+       DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
+                               dpaa2_qp->rx_vq.err_pkts);
        /*Return the total number of packets received to DPAA2 app*/
        return num_rx;
 }
@@ -1737,6 +2035,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
                dpaa2_free_dq_storage(qp->rx_vq.q_storage);
                rte_free(qp->rx_vq.q_storage);
        }
+       rte_mempool_free(qp->fle_pool);
        rte_free(qp);
 
        dev->data->queue_pairs[queue_pair_id] = NULL;
@@ -1747,7 +2046,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 /** Setup a queue pair */
 static int
 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-               __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+               const struct rte_cryptodev_qp_conf *qp_conf,
                __rte_unused int socket_id)
 {
        struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
@@ -1755,6 +2054,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
        struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
        struct dpseci_rx_queue_cfg cfg;
        int32_t retcode;
+       char str[30];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1794,6 +2094,19 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
        dev->data->queue_pairs[qp_id] = qp;
 
+       snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
+                       getpid(), dev->data->dev_id, qp_id);
+       qp->fle_pool = rte_mempool_create((const char *)str,
+                       qp_conf->nb_descriptors,
+                       FLE_POOL_BUF_SIZE,
+                       FLE_POOL_CACHE_SIZE, 0,
+                       NULL, NULL, NULL, NULL,
+                       SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
+       if (!qp->fle_pool) {
+               DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
+               return -ENOMEM;
+       }
+
        cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
        cfg.user_ctx = (size_t)(&qp->rx_vq);
        retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
@@ -1811,11 +2124,9 @@ dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
 }
 
 static int
-dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
-                     struct rte_crypto_sym_xform *xform,
+dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
                      dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo cipherdata;
        int bufsize, ret = 0;
        struct ctxt_priv *priv;
@@ -1832,14 +2143,12 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
-
        flc = &priv->flc_desc[0].flc;
 
        session->ctxt_type = DPAA2_SEC_CIPHER;
        session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
                        RTE_CACHE_LINE_SIZE);
-       if (session->cipher_key.data == NULL) {
+       if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
                DPAA2_SEC_ERR("No Memory for cipher key");
                rte_free(priv);
                return -ENOMEM;
@@ -1878,19 +2187,19 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
                                                session->iv.length,
                                                session->dir);
                break;
-       case RTE_CRYPTO_CIPHER_AES_CTR:
-               cipherdata.algtype = OP_ALG_ALGSEL_AES;
-               cipherdata.algmode = OP_ALG_AAI_CTR;
-               session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+       case RTE_CRYPTO_CIPHER_DES_CBC:
+               cipherdata.algtype = OP_ALG_ALGSEL_DES;
+               cipherdata.algmode = OP_ALG_AAI_CBC;
+               session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
                bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
                                                SHR_NEVER, &cipherdata,
                                                session->iv.length,
                                                session->dir);
                break;
-       case RTE_CRYPTO_CIPHER_3DES_CTR:
-               cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+               cipherdata.algtype = OP_ALG_ALGSEL_AES;
                cipherdata.algmode = OP_ALG_AAI_CTR;
-               session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
+               session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
                bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
                                                SHR_NEVER, &cipherdata,
                                                session->iv.length,
@@ -1914,6 +2223,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
        case RTE_CRYPTO_CIPHER_AES_F8:
        case RTE_CRYPTO_CIPHER_AES_ECB:
        case RTE_CRYPTO_CIPHER_3DES_ECB:
+       case RTE_CRYPTO_CIPHER_3DES_CTR:
        case RTE_CRYPTO_CIPHER_AES_XTS:
        case RTE_CRYPTO_CIPHER_ARC4:
        case RTE_CRYPTO_CIPHER_NULL:
@@ -1951,11 +2261,9 @@ error_out:
 }
 
 static int
-dpaa2_sec_auth_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata;
        int bufsize, ret = 0;
        struct ctxt_priv *priv;
@@ -1973,25 +2281,26 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[DESC_INITFINAL].flc;
 
        session->ctxt_type = DPAA2_SEC_AUTH;
-       session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+       session->auth_key.length = xform->auth.key.length;
+       if (xform->auth.key.length) {
+               session->auth_key.data = rte_zmalloc(NULL,
+                       xform->auth.key.length,
                        RTE_CACHE_LINE_SIZE);
-       if (session->auth_key.data == NULL) {
-               DPAA2_SEC_ERR("Unable to allocate memory for auth key");
-               rte_free(priv);
-               return -ENOMEM;
+               if (session->auth_key.data == NULL) {
+                       DPAA2_SEC_ERR("Unable to allocate memory for auth key");
+                       rte_free(priv);
+                       return -ENOMEM;
+               }
+               memcpy(session->auth_key.data, xform->auth.key.data,
+                      xform->auth.key.length);
+               authdata.key = (size_t)session->auth_key.data;
+               authdata.key_enc_flags = 0;
+               authdata.key_type = RTA_DATA_IMM;
        }
-       session->auth_key.length = xform->auth.key.length;
-
-       memcpy(session->auth_key.data, xform->auth.key.data,
-              xform->auth.key.length);
-       authdata.key = (size_t)session->auth_key.data;
        authdata.keylen = session->auth_key.length;
-       authdata.key_enc_flags = 0;
-       authdata.key_type = RTA_DATA_IMM;
 
        session->digest_length = xform->auth.digest_length;
        session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
@@ -2074,18 +2383,84 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
                                           !session->dir,
                                           session->digest_length);
                break;
-       case RTE_CRYPTO_AUTH_KASUMI_F9:
-       case RTE_CRYPTO_AUTH_NULL:
        case RTE_CRYPTO_AUTH_SHA1:
+               authdata.algtype = OP_ALG_ALGSEL_SHA1;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
+       case RTE_CRYPTO_AUTH_MD5:
+               authdata.algtype = OP_ALG_ALGSEL_MD5;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_MD5;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_SHA256:
+               authdata.algtype = OP_ALG_ALGSEL_SHA256;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
+       case RTE_CRYPTO_AUTH_SHA384:
+               authdata.algtype = OP_ALG_ALGSEL_SHA384;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_SHA512:
+               authdata.algtype = OP_ALG_ALGSEL_SHA512;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_SHA224:
-       case RTE_CRYPTO_AUTH_SHA384:
-       case RTE_CRYPTO_AUTH_MD5:
-       case RTE_CRYPTO_AUTH_AES_GMAC:
+               authdata.algtype = OP_ALG_ALGSEL_SHA224;
+               authdata.algmode = OP_ALG_AAI_HASH;
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
+               bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+               authdata.algtype = OP_ALG_ALGSEL_AES;
+               authdata.algmode = OP_ALG_AAI_XCBC_MAC;
+               session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
+               bufsize = cnstr_shdsc_aes_mac(
+                                       priv->flc_desc[DESC_INITFINAL].desc,
+                                       1, 0, SHR_NEVER, &authdata,
+                                       !session->dir,
+                                       session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_AES_CMAC:
+               authdata.algtype = OP_ALG_ALGSEL_AES;
+               authdata.algmode = OP_ALG_AAI_CMAC;
+               session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+               bufsize = cnstr_shdsc_aes_mac(
+                                          priv->flc_desc[DESC_INITFINAL].desc,
+                                          1, 0, SHR_NEVER, &authdata,
+                                          !session->dir,
+                                          session->digest_length);
+               break;
        case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+       case RTE_CRYPTO_AUTH_AES_GMAC:
+       case RTE_CRYPTO_AUTH_KASUMI_F9:
+       case RTE_CRYPTO_AUTH_NULL:
                DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
                              xform->auth.algo);
                ret = -ENOTSUP;
@@ -2121,12 +2496,10 @@ error_out:
 }
 
 static int
-dpaa2_sec_aead_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
        struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo aeaddata;
        int bufsize;
        struct ctxt_priv *priv;
@@ -2150,7 +2523,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
@@ -2194,7 +2566,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 
        priv->flc_desc[0].desc[0] = aeaddata.keylen;
        err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
-                              MIN_JOB_DESC_SIZE,
+                              DESC_JOB_IO_LEN,
                               (unsigned int *)priv->flc_desc[0].desc,
                               &priv->flc_desc[0].desc[1], 1);
 
@@ -2246,11 +2618,9 @@ error_out:
 
 
 static int
-dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata, cipherdata;
        int bufsize;
        struct ctxt_priv *priv;
@@ -2288,7 +2658,6 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -2352,6 +2721,17 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
                session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
                break;
        case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+               authdata.algtype = OP_ALG_ALGSEL_AES;
+               authdata.algmode = OP_ALG_AAI_XCBC_MAC;
+               session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
+               break;
+       case RTE_CRYPTO_AUTH_AES_CMAC:
+               authdata.algtype = OP_ALG_ALGSEL_AES;
+               authdata.algmode = OP_ALG_AAI_CMAC;
+               session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+               break;
+       case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+       case RTE_CRYPTO_AUTH_AES_GMAC:
        case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
        case RTE_CRYPTO_AUTH_NULL:
        case RTE_CRYPTO_AUTH_SHA1:
@@ -2360,10 +2740,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
        case RTE_CRYPTO_AUTH_SHA224:
        case RTE_CRYPTO_AUTH_SHA384:
        case RTE_CRYPTO_AUTH_MD5:
-       case RTE_CRYPTO_AUTH_AES_GMAC:
        case RTE_CRYPTO_AUTH_KASUMI_F9:
-       case RTE_CRYPTO_AUTH_AES_CMAC:
-       case RTE_CRYPTO_AUTH_AES_CBC_MAC:
        case RTE_CRYPTO_AUTH_ZUC_EIA3:
                DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
                              auth_xform->algo);
@@ -2391,6 +2768,11 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
                cipherdata.algmode = OP_ALG_AAI_CBC;
                session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
                break;
+       case RTE_CRYPTO_CIPHER_DES_CBC:
+               cipherdata.algtype = OP_ALG_ALGSEL_DES;
+               cipherdata.algmode = OP_ALG_AAI_CBC;
+               session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
+               break;
        case RTE_CRYPTO_CIPHER_AES_CTR:
                cipherdata.algtype = OP_ALG_ALGSEL_AES;
                cipherdata.algmode = OP_ALG_AAI_CTR;
@@ -2400,6 +2782,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
        case RTE_CRYPTO_CIPHER_ZUC_EEA3:
        case RTE_CRYPTO_CIPHER_NULL:
        case RTE_CRYPTO_CIPHER_3DES_ECB:
+       case RTE_CRYPTO_CIPHER_3DES_CTR:
        case RTE_CRYPTO_CIPHER_AES_ECB:
        case RTE_CRYPTO_CIPHER_KASUMI_F8:
                DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
@@ -2418,7 +2801,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
        priv->flc_desc[0].desc[0] = cipherdata.keylen;
        priv->flc_desc[0].desc[1] = authdata.keylen;
        err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
-                              MIN_JOB_DESC_SIZE,
+                              DESC_JOB_IO_LEN,
                               (unsigned int *)priv->flc_desc[0].desc,
                               &priv->flc_desc[0].desc[2], 2);
 
@@ -2480,8 +2863,7 @@ error_out:
 }
 
 static int
-dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
-                           struct rte_crypto_sym_xform *xform, void *sess)
+dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
 {
        dpaa2_sec_session *session = sess;
        int ret;
@@ -2499,37 +2881,37 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
 
        /* Cipher Only */
        if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
-               ret = dpaa2_sec_cipher_init(dev, xform, session);
+               ret = dpaa2_sec_cipher_init(xform, session);
 
        /* Authentication Only */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
                   xform->next == NULL) {
-               ret = dpaa2_sec_auth_init(dev, xform, session);
+               ret = dpaa2_sec_auth_init(xform, session);
 
        /* Cipher then Authenticate */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
                session->ext_params.aead_ctxt.auth_cipher_text = true;
                if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
-                       ret = dpaa2_sec_auth_init(dev, xform, session);
+                       ret = dpaa2_sec_auth_init(xform, session);
                else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
-                       ret = dpaa2_sec_cipher_init(dev, xform, session);
+                       ret = dpaa2_sec_cipher_init(xform, session);
                else
-                       ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+                       ret = dpaa2_sec_aead_chain_init(xform, session);
        /* Authenticate then Cipher */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
                   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
                session->ext_params.aead_ctxt.auth_cipher_text = false;
                if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
-                       ret = dpaa2_sec_cipher_init(dev, xform, session);
+                       ret = dpaa2_sec_cipher_init(xform, session);
                else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
-                       ret = dpaa2_sec_auth_init(dev, xform, session);
+                       ret = dpaa2_sec_auth_init(xform, session);
                else
-                       ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+                       ret = dpaa2_sec_aead_chain_init(xform, session);
        /* AEAD operation for AES-GCM kind of Algorithms */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
                   xform->next == NULL) {
-               ret = dpaa2_sec_aead_init(dev, xform, session);
+               ret = dpaa2_sec_aead_init(xform, session);
 
        } else {
                DPAA2_SEC_ERR("Invalid crypto type");
@@ -2539,7 +2921,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
        return ret;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
 static int
 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
                        dpaa2_sec_session *session,
@@ -2690,14 +3072,18 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
                authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
                authdata->algmode = OP_ALG_AAI_HMAC;
                break;
+       case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+               authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
+               authdata->algmode = OP_ALG_AAI_XCBC_MAC;
+               break;
        case RTE_CRYPTO_AUTH_AES_CMAC:
                authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
+               authdata->algmode = OP_ALG_AAI_CMAC;
                break;
        case RTE_CRYPTO_AUTH_NULL:
                authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
                break;
        case RTE_CRYPTO_AUTH_SHA224_HMAC:
-       case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
        case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
        case RTE_CRYPTO_AUTH_SHA1:
        case RTE_CRYPTO_AUTH_SHA256:
@@ -2731,6 +3117,10 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
                cipherdata->algtype = OP_PCL_IPSEC_3DES;
                cipherdata->algmode = OP_ALG_AAI_CBC;
                break;
+       case RTE_CRYPTO_CIPHER_DES_CBC:
+               cipherdata->algtype = OP_PCL_IPSEC_DES;
+               cipherdata->algmode = OP_ALG_AAI_CBC;
+               break;
        case RTE_CRYPTO_CIPHER_AES_CTR:
                cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
                cipherdata->algmode = OP_ALG_AAI_CTR;
@@ -2741,6 +3131,7 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
        case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
        case RTE_CRYPTO_CIPHER_ZUC_EEA3:
        case RTE_CRYPTO_CIPHER_3DES_ECB:
+       case RTE_CRYPTO_CIPHER_3DES_CTR:
        case RTE_CRYPTO_CIPHER_AES_ECB:
        case RTE_CRYPTO_CIPHER_KASUMI_F8:
                DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
@@ -2755,12 +3146,6 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
        return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY_TEST
-static uint8_t aes_cbc_iv[] = {
-       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
-#endif
-
 static int
 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                            struct rte_security_session_conf *conf,
@@ -2775,7 +3160,6 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
        struct alginfo authdata, cipherdata;
        int bufsize;
        struct sec_flow_context *flc;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        int ret = -1;
 
        PMD_INIT_FUNC_TRACE();
@@ -2790,9 +3174,14 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
+       if (ipsec_xform->life.bytes_hard_limit != 0 ||
+           ipsec_xform->life.bytes_soft_limit != 0 ||
+           ipsec_xform->life.packets_hard_limit != 0 ||
+           ipsec_xform->life.packets_soft_limit != 0)
+               return -ENOTSUP;
+
        memset(session, 0, sizeof(dpaa2_sec_session));
 
        if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
@@ -2851,8 +3240,9 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
                        PDBOPTS_ESP_OIHI_PDB_INL |
                        PDBOPTS_ESP_IVSRC |
-                       PDBHMO_ESP_ENCAP_DTTL |
                        PDBHMO_ESP_SNR;
+               if (ipsec_xform->options.dec_ttl)
+                       encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
                if (ipsec_xform->options.esn)
                        encap_pdb.options |= PDBOPTS_ESP_ESN;
                encap_pdb.spi = ipsec_xform->spi;
@@ -2900,7 +3290,8 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                }
 
                bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
-                               1, 0, SHR_SERIAL, &encap_pdb,
+                               1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
+                               SHR_WAIT : SHR_SERIAL, &encap_pdb,
                                hdr, &cipherdata, &authdata);
        } else if (ipsec_xform->direction ==
                        RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
@@ -2933,6 +3324,10 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                        uint32_t win_sz;
                        win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
 
+                       if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
+                               DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
+                               win_sz = 128;
+                       }
                        switch (win_sz) {
                        case 1:
                        case 2:
@@ -2945,13 +3340,24 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                        case 64:
                                decap_pdb.options |= PDBOPTS_ESP_ARS64;
                                break;
+                       case 256:
+                               decap_pdb.options |= PDBOPTS_ESP_ARS256;
+                               break;
+                       case 512:
+                               decap_pdb.options |= PDBOPTS_ESP_ARS512;
+                               break;
+                       case 1024:
+                               decap_pdb.options |= PDBOPTS_ESP_ARS1024;
+                               break;
+                       case 128:
                        default:
                                decap_pdb.options |= PDBOPTS_ESP_ARS128;
                        }
                }
                session->dir = DIR_DEC;
                bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
-                               1, 0, SHR_SERIAL,
+                               1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
+                               SHR_WAIT : SHR_SERIAL,
                                &decap_pdb, &cipherdata, &authdata);
        } else
                goto out;
@@ -2997,10 +3403,9 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
        struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
        struct rte_crypto_sym_xform *xform = conf->crypto_xform;
        struct rte_crypto_auth_xform *auth_xform = NULL;
-       struct rte_crypto_cipher_xform *cipher_xform;
+       struct rte_crypto_cipher_xform *cipher_xform = NULL;
        dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
        struct ctxt_priv *priv;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata, cipherdata;
        struct alginfo *p_authdata = NULL;
        int bufsize = -1;
@@ -3025,22 +3430,23 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        /* find xfrm types */
-       if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+       if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
                cipher_xform = &xform->cipher;
-       } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
-                  xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-               session->ext_params.aead_ctxt.auth_cipher_text = true;
-               cipher_xform = &xform->cipher;
-               auth_xform = &xform->next->auth;
-       } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
-                  xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
-               session->ext_params.aead_ctxt.auth_cipher_text = false;
-               cipher_xform = &xform->next->cipher;
+               if (xform->next != NULL &&
+                       xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+                       session->ext_params.aead_ctxt.auth_cipher_text = true;
+                       auth_xform = &xform->next->auth;
+               }
+       } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
                auth_xform = &xform->auth;
+               if (xform->next != NULL &&
+                       xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+                       session->ext_params.aead_ctxt.auth_cipher_text = false;
+                       cipher_xform = &xform->next->cipher;
+               }
        } else {
                DPAA2_SEC_ERR("Invalid crypto type");
                return -EINVAL;
@@ -3079,7 +3485,8 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
        session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
        session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
        /* hfv ovd offset location is stored in iv.offset value*/
-       session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
+       if (cipher_xform)
+               session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
 
        cipherdata.key = (size_t)session->cipher_key.data;
        cipherdata.keylen = session->cipher_key.length;
@@ -3156,6 +3563,30 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
                goto out;
        }
 
+       if (pdcp_xform->sdap_enabled) {
+               int nb_keys_to_inline =
+                       rta_inline_pdcp_sdap_query(authdata.algtype,
+                                       cipherdata.algtype,
+                                       session->pdcp.sn_size,
+                                       session->pdcp.hfn_ovd);
+               if (nb_keys_to_inline >= 1) {
+                       cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+                       cipherdata.key_type = RTA_DATA_PTR;
+               }
+               if (nb_keys_to_inline >= 2) {
+                       authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+                       authdata.key_type = RTA_DATA_PTR;
+               }
+       } else {
+               if (rta_inline_pdcp_query(authdata.algtype,
+                                       cipherdata.algtype,
+                                       session->pdcp.sn_size,
+                                       session->pdcp.hfn_ovd)) {
+                       cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+                       cipherdata.key_type = RTA_DATA_PTR;
+               }
+       }
+
        if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
                if (session->dir == DIR_ENC)
                        bufsize = cnstr_shdsc_pdcp_c_plane_encap(
@@ -3165,8 +3596,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
                                        pdcp_xform->bearer,
                                        pdcp_xform->pkt_dir,
                                        pdcp_xform->hfn_threshold,
-                                       &cipherdata, &authdata,
-                                       0);
+                                       &cipherdata, &authdata);
                else if (session->dir == DIR_DEC)
                        bufsize = cnstr_shdsc_pdcp_c_plane_decap(
                                        priv->flc_desc[0].desc, 1, swap,
@@ -3175,27 +3605,51 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
                                        pdcp_xform->bearer,
                                        pdcp_xform->pkt_dir,
                                        pdcp_xform->hfn_threshold,
-                                       &cipherdata, &authdata,
-                                       0);
+                                       &cipherdata, &authdata);
+
+       } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
+               bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
+                                                    1, swap, &authdata);
        } else {
-               if (session->dir == DIR_ENC)
-                       bufsize = cnstr_shdsc_pdcp_u_plane_encap(
+               if (session->dir == DIR_ENC) {
+                       if (pdcp_xform->sdap_enabled)
+                               bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
                                        priv->flc_desc[0].desc, 1, swap,
                                        session->pdcp.sn_size,
                                        pdcp_xform->hfn,
                                        pdcp_xform->bearer,
                                        pdcp_xform->pkt_dir,
                                        pdcp_xform->hfn_threshold,
-                                       &cipherdata, p_authdata, 0);
-               else if (session->dir == DIR_DEC)
-                       bufsize = cnstr_shdsc_pdcp_u_plane_decap(
+                                       &cipherdata, p_authdata);
+                       else
+                               bufsize = cnstr_shdsc_pdcp_u_plane_encap(
+                                       priv->flc_desc[0].desc, 1, swap,
+                                       session->pdcp.sn_size,
+                                       pdcp_xform->hfn,
+                                       pdcp_xform->bearer,
+                                       pdcp_xform->pkt_dir,
+                                       pdcp_xform->hfn_threshold,
+                                       &cipherdata, p_authdata);
+               } else if (session->dir == DIR_DEC) {
+                       if (pdcp_xform->sdap_enabled)
+                               bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
                                        priv->flc_desc[0].desc, 1, swap,
                                        session->pdcp.sn_size,
                                        pdcp_xform->hfn,
                                        pdcp_xform->bearer,
                                        pdcp_xform->pkt_dir,
                                        pdcp_xform->hfn_threshold,
-                                       &cipherdata, p_authdata, 0);
+                                       &cipherdata, p_authdata);
+                       else
+                               bufsize = cnstr_shdsc_pdcp_u_plane_decap(
+                                       priv->flc_desc[0].desc, 1, swap,
+                                       session->pdcp.sn_size,
+                                       pdcp_xform->hfn,
+                                       pdcp_xform->bearer,
+                                       pdcp_xform->pkt_dir,
+                                       pdcp_xform->hfn_threshold,
+                                       &cipherdata, p_authdata);
+               }
        }
 
        if (bufsize < 0) {
@@ -3313,7 +3767,7 @@ dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+       ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
        if (ret != 0) {
                DPAA2_SEC_ERR("Failed to configure session parameters");
                /* Return session to mempool */
@@ -3372,6 +3826,10 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       /* Change the tx burst function if ordered queues are used */
+       if (priv->en_ordered)
+               dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
+
        memset(&attr, 0, sizeof(struct dpseci_attr));
 
        ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
@@ -3430,32 +3888,10 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
 }
 
 static int
-dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
 {
-       struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
-       struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
-       int ret;
-
        PMD_INIT_FUNC_TRACE();
 
-       /* Function is reverse of dpaa2_sec_dev_init.
-        * It does the following:
-        * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
-        * 2. Close the DPSECI device
-        * 3. Free the allocated resources.
-        */
-
-       /*Close the device at underlying layer*/
-       ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
-       if (ret) {
-               DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
-               return -1;
-       }
-
-       /*Free the allocated memory for ethernet private data and dpseci*/
-       priv->hw = NULL;
-       rte_free(dpseci);
-
        return 0;
 }
 
@@ -3481,7 +3917,7 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
                         struct rte_cryptodev_stats *stats)
 {
        struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
-       struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+       struct fsl_mc_io dpseci;
        struct dpseci_sec_counters counters = {0};
        struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
                                        dev->data->queue_pairs;
@@ -3493,7 +3929,7 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
                return;
        }
        for (i = 0; i < dev->data->nb_queue_pairs; i++) {
-               if (qp[i] == NULL) {
+               if (qp == NULL || qp[i] == NULL) {
                        DPAA2_SEC_DEBUG("Uninitialised queue pair");
                        continue;
                }
@@ -3504,7 +3940,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
                stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
        }
 
-       ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+       /* In case as secondary process access stats, MCP portal in priv-hw
+        * may have primary process address. Need the secondary process
+        * based MCP portal address for this object.
+        */
+       dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+       ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
                                      &counters);
        if (ret) {
                DPAA2_SEC_ERR("SEC counters failed");
@@ -3557,6 +3998,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
                                 struct dpaa2_queue *rxq,
                                 struct rte_event *ev)
 {
+       struct dpaa2_sec_qp *qp;
        /* Prefetching mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
@@ -3564,6 +4006,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
        /* Prefetching ipsec crypto_op stored in priv data of mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -3571,7 +4014,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
        ev->sched_type = rxq->ev.sched_type;
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
-       ev->event_ptr = sec_fd_to_mbuf(fd);
+       ev->event_ptr = sec_fd_to_mbuf(fd, qp);
 
        qbman_swp_dqrr_consume(swp, dq);
 }
@@ -3583,7 +4026,8 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
                                 struct rte_event *ev)
 {
        uint8_t dqrr_index;
-       struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+       struct dpaa2_sec_qp *qp;
+       struct rte_crypto_op *crypto_op;
        /* Prefetching mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
@@ -3591,6 +4035,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
        /* Prefetching ipsec crypto_op stored in priv data of mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -3599,12 +4044,50 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
-       ev->event_ptr = sec_fd_to_mbuf(fd);
+       crypto_op = sec_fd_to_mbuf(fd, qp);
        dqrr_index = qbman_get_dqrr_idx(dq);
-       crypto_op->sym->m_src->seqn = dqrr_index + 1;
+       *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
        DPAA2_PER_LCORE_DQRR_SIZE++;
        DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
        DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+       ev->event_ptr = crypto_op;
+}
+
+static void __rte_hot
+dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
+                               const struct qbman_fd *fd,
+                               const struct qbman_result *dq,
+                               struct dpaa2_queue *rxq,
+                               struct rte_event *ev)
+{
+       struct rte_crypto_op *crypto_op;
+       struct dpaa2_sec_qp *qp;
+
+       /* Prefetching mbuf */
+       rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+               rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+       /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+       rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
+       ev->flow_id = rxq->ev.flow_id;
+       ev->sub_event_type = rxq->ev.sub_event_type;
+       ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+       ev->op = RTE_EVENT_OP_NEW;
+       ev->sched_type = rxq->ev.sched_type;
+       ev->queue_id = rxq->ev.queue_id;
+       ev->priority = rxq->ev.priority;
+       crypto_op = sec_fd_to_mbuf(fd, qp);
+
+       *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
+       *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
+               DPAA2_EQCR_OPRID_SHIFT;
+       *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
+               DPAA2_EQCR_SEQNUM_SHIFT;
+
+       qbman_swp_dqrr_consume(swp, dq);
+       ev->event_ptr = crypto_op;
 }
 
 int
@@ -3624,6 +4107,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
                qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
        else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
                qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+       else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
+               qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
        else
                return -EINVAL;
 
@@ -3642,6 +4127,37 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
                cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
                cfg.order_preservation_en = 1;
        }
+
+       if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
+               struct opr_cfg ocfg;
+
+               /* Restoration window size = 256 frames */
+               ocfg.oprrws = 3;
+               /* Restoration window size = 512 frames for LX2 */
+               if (dpaa2_svr_family == SVR_LX2160A)
+                       ocfg.oprrws = 4;
+               /* Auto advance NESN window enabled */
+               ocfg.oa = 1;
+               /* Late arrival window size disabled */
+               ocfg.olws = 0;
+               /* ORL resource exhaustaion advance NESN disabled */
+               ocfg.oeane = 0;
+
+               if (priv->en_loose_ordered)
+                       ocfg.oloe = 1;
+               else
+                       ocfg.oloe = 0;
+
+               ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
+                                  qp_id, OPR_OPT_CREATE, &ocfg);
+               if (ret) {
+                       RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
+                       return ret;
+               }
+               qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
+               priv->en_ordered = 1;
+       }
+
        ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
                                  qp_id, &cfg);
        if (ret) {
@@ -3688,9 +4204,12 @@ static struct rte_cryptodev_ops crypto_ops = {
        .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
        .sym_session_configure    = dpaa2_sec_sym_session_configure,
        .sym_session_clear        = dpaa2_sec_sym_session_clear,
+       /* Raw data-path API related operations */
+       .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+       .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
 static const struct rte_security_capability *
 dpaa2_sec_capabilities_get(void *device __rte_unused)
 {
@@ -3710,11 +4229,30 @@ static const struct rte_security_ops dpaa2_sec_security_ops = {
 static int
 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 {
-       struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+       struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+       struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+       int ret;
 
-       rte_free(dev->security_ctx);
+       PMD_INIT_FUNC_TRACE();
 
-       rte_mempool_free(internals->fle_pool);
+       /* Function is reverse of dpaa2_sec_dev_init.
+        * It does the following:
+        * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+        * 2. Close the DPSECI device
+        * 3. Free the allocated resources.
+        */
+
+       /*Close the device at underlying layer*/
+       ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+       if (ret) {
+               DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
+               return -1;
+       }
+
+       /*Free the allocated memory for ethernet private data and dpseci*/
+       priv->hw = NULL;
+       rte_free(dpseci);
+       rte_free(dev->security_ctx);
 
        DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
                       dev->data->name, rte_socket_id());
@@ -3722,20 +4260,66 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
        return 0;
 }
 
+static int
+check_devargs_handler(const char *key, const char *value,
+                     void *opaque)
+{
+       struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
+       struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+
+       if (!strcmp(key, "drv_strict_order")) {
+               priv->en_loose_ordered = false;
+       } else if (!strcmp(key, "drv_dump_mode")) {
+               dpaa2_sec_dp_dump = atoi(value);
+               if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+                       DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+                                     "supported, changing to FULL error"
+                                     " prints\n");
+                       dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+               }
+       } else
+               return -1;
+
+       return 0;
+}
+
+static void
+dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
+{
+       struct rte_kvargs *kvlist;
+       struct rte_devargs *devargs;
+
+       devargs = cryptodev->device->devargs;
+       if (!devargs)
+               return;
+
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
+       if (!kvlist)
+               return;
+
+       if (!rte_kvargs_count(kvlist, key)) {
+               rte_kvargs_free(kvlist);
+               return;
+       }
+
+       rte_kvargs_process(kvlist, key,
+                       check_devargs_handler, (void *)cryptodev);
+       rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
        struct dpaa2_sec_dev_private *internals;
        struct rte_device *dev = cryptodev->device;
        struct rte_dpaa2_device *dpaa2_dev;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        struct rte_security_ctx *security_instance;
 #endif
        struct fsl_mc_io *dpseci;
        uint16_t token;
        struct dpseci_attr attr;
        int retcode, hw_id;
-       char str[30];
 
        PMD_INIT_FUNC_TRACE();
        dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -3750,6 +4334,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
                        RTE_CRYPTODEV_FF_HW_ACCELERATED |
                        RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
                        RTE_CRYPTODEV_FF_SECURITY |
+                       RTE_CRYPTODEV_FF_SYM_RAW_DP |
                        RTE_CRYPTODEV_FF_IN_PLACE_SGL |
                        RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
                        RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3767,7 +4352,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
                DPAA2_SEC_DEBUG("Device already init by primary process");
                return 0;
        }
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        /* Initialize security_ctx only for primary process*/
        security_instance = rte_malloc("rte_security_instances_ops",
                                sizeof(struct rte_security_ctx), 0);
@@ -3786,7 +4371,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
                        "Error in allocating the memory for dpsec object");
                return -ENOMEM;
        }
-       dpseci->regs = rte_mcp_ptr_list[0];
+       dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
 
        retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
        if (retcode != 0) {
@@ -3808,20 +4393,10 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
        cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
        internals->hw = dpseci;
        internals->token = token;
+       internals->en_loose_ordered = true;
 
-       snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
-                       getpid(), cryptodev->data->dev_id);
-       internals->fle_pool = rte_mempool_create((const char *)str,
-                       FLE_POOL_NUM_BUFS,
-                       FLE_POOL_BUF_SIZE,
-                       FLE_POOL_CACHE_SIZE, 0,
-                       NULL, NULL, NULL, NULL,
-                       SOCKET_ID_ANY, 0);
-       if (!internals->fle_pool) {
-               DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
-               goto init_error;
-       }
-
+       dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
+       dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
        DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
        return 0;
 
@@ -3875,8 +4450,10 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
 
        /* Invoke PMD device initialization function */
        retval = dpaa2_sec_dev_init(cryptodev);
-       if (retval == 0)
+       if (retval == 0) {
+               rte_cryptodev_pmd_probing_finish(cryptodev);
                return 0;
+       }
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
                rte_free(cryptodev->data->dev_private);
@@ -3918,11 +4495,7 @@ static struct cryptodev_driver dpaa2_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
                rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
-
-RTE_INIT(dpaa2_sec_init_log)
-{
-       /* Bus level logs */
-       dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
-       if (dpaa2_logtype_sec >= 0)
-               rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
-}
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+               DRIVER_STRICT_ORDER "=<int>"
+               DRIVER_DUMP_MODE "=<int>");
+RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);