crypto/qat: refactor asym algorithm macros and logs
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
index 03fef5e..8444f1a 100644 (file)
@@ -65,11 +65,10 @@ uint8_t cryptodev_driver_id;
 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
 
 static inline void
-free_fle(const struct qbman_fd *fd)
+free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
 {
        struct qbman_fle *fle;
        struct rte_crypto_op *op;
-       struct ctxt_priv *priv;
 
 #ifdef RTE_LIB_SECURITY
        if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
@@ -78,10 +77,9 @@ free_fle(const struct qbman_fd *fd)
        fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
        op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
        /* free the fle memory */
-       if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) {
-               priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
-               rte_mempool_put(priv->fle_pool, (void *)(fle-1));
-       } else
+       if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
+               rte_mempool_put(qp->fle_pool, (void *)(fle-1));
+       else
                rte_free((void *)(fle-1));
 }
 
@@ -206,7 +204,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_proto_compound_fd(dpaa2_sec_session *sess,
               struct rte_crypto_op *op,
-              struct qbman_fd *fd, uint16_t bpid)
+              struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -223,9 +221,9 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
        flc = &priv->flc_desc[0].flc;
 
        /* we are using the first FLE entry to store Mbuf */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_DP_ERR("Memory alloc failed");
+               DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -282,11 +280,11 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
 static inline int
 build_proto_fd(dpaa2_sec_session *sess,
               struct rte_crypto_op *op,
-              struct qbman_fd *fd, uint16_t bpid)
+              struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        if (sym_op->m_dst)
-               return build_proto_compound_fd(sess, op, fd, bpid);
+               return build_proto_compound_fd(sess, op, fd, bpid, qp);
 
        struct ctxt_priv *priv = sess->ctxt;
        struct sec_flow_context *flc;
@@ -461,7 +459,8 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_authenc_gcm_fd(dpaa2_sec_session *sess,
                     struct rte_crypto_op *op,
-                    struct qbman_fd *fd, uint16_t bpid)
+                    struct qbman_fd *fd, uint16_t bpid,
+                    struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -485,9 +484,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
         * to get the MBUF Addr from the previous FLE.
         * We can have a better approach to use the inline Mbuf
         */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -748,7 +747,7 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
 static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
                 struct rte_crypto_op *op,
-                struct qbman_fd *fd, uint16_t bpid)
+                struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct ctxt_priv *priv = sess->ctxt;
@@ -777,9 +776,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
         * to get the MBUF Addr from the previous FLE.
         * We can have a better approach to use the inline Mbuf
         */
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1010,7 +1009,7 @@ static inline int build_auth_sg_fd(
 
 static inline int
 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-             struct qbman_fd *fd, uint16_t bpid)
+             struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct qbman_fle *fle, *sge;
@@ -1034,9 +1033,9 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
                data_offset = data_offset >> 3;
        }
 
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1257,7 +1256,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 static int
 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-               struct qbman_fd *fd, uint16_t bpid)
+               struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct qbman_fle *fle, *sge;
@@ -1287,9 +1286,9 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
        else
                dst = sym_op->m_src;
 
-       retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+       retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
        if (retval) {
-               DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+               DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
                return -ENOMEM;
        }
        memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1374,7 +1373,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 static inline int
 build_sec_fd(struct rte_crypto_op *op,
-            struct qbman_fd *fd, uint16_t bpid)
+            struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
 {
        int ret = -1;
        dpaa2_sec_session *sess;
@@ -1387,11 +1386,15 @@ build_sec_fd(struct rte_crypto_op *op,
                sess = (dpaa2_sec_session *)get_sec_session_private_data(
                                op->sym->sec_session);
 #endif
-       else
+       else {
+               DPAA2_SEC_DP_ERR("Session type invalid\n");
                return -ENOTSUP;
+       }
 
-       if (!sess)
+       if (!sess) {
+               DPAA2_SEC_DP_ERR("Session not available\n");
                return -EINVAL;
+       }
 
        /* Any of the buffer is segmented*/
        if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
@@ -1423,23 +1426,23 @@ build_sec_fd(struct rte_crypto_op *op,
        } else {
                switch (sess->ctxt_type) {
                case DPAA2_SEC_CIPHER:
-                       ret = build_cipher_fd(sess, op, fd, bpid);
+                       ret = build_cipher_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_AUTH:
-                       ret = build_auth_fd(sess, op, fd, bpid);
+                       ret = build_auth_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_AEAD:
-                       ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+                       ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_CIPHER_HASH:
-                       ret = build_authenc_fd(sess, op, fd, bpid);
+                       ret = build_authenc_fd(sess, op, fd, bpid, qp);
                        break;
 #ifdef RTE_LIB_SECURITY
                case DPAA2_SEC_IPSEC:
-                       ret = build_proto_fd(sess, op, fd, bpid);
+                       ret = build_proto_fd(sess, op, fd, bpid, qp);
                        break;
                case DPAA2_SEC_PDCP:
-                       ret = build_proto_compound_fd(sess, op, fd, bpid);
+                       ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
                        break;
 #endif
                case DPAA2_SEC_HASH_CIPHER:
@@ -1513,10 +1516,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
                        memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
                        mb_pool = (*ops)->sym->m_src->pool;
                        bpid = mempool_to_bpid(mb_pool);
-                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
                        if (ret) {
-                               DPAA2_SEC_ERR("error: Improper packet contents"
-                                             " for crypto operation");
+                               DPAA2_SEC_DP_DEBUG("FD build failed\n");
                                goto skip_tx;
                        }
                        ops++;
@@ -1537,7 +1539,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
                                        DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
                                        /* freeing the fle buffers */
                                        while (loop < frames_to_send) {
-                                               free_fle(&fd_arr[loop]);
+                                               free_fle(&fd_arr[loop],
+                                                               dpaa2_qp);
                                                loop++;
                                        }
                                        goto skip_tx;
@@ -1566,6 +1569,10 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
        int16_t diff = 0;
        dpaa2_sec_session *sess_priv __rte_unused;
 
+       if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+               DPAA2_SEC_ERR("error: non inline buffer");
+               return NULL;
+       }
        struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
                DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
@@ -1584,16 +1591,23 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
        else
                mbuf->data_off += SEC_FLC_DHR_INBOUND;
 
+       if (unlikely(fd->simple.frc)) {
+               DPAA2_SEC_ERR("SEC returned Error - %x",
+                               fd->simple.frc);
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+       } else {
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+       }
+
        return op;
 }
 #endif
 
 static inline struct rte_crypto_op *
-sec_fd_to_mbuf(const struct qbman_fd *fd)
+sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
 {
        struct qbman_fle *fle;
        struct rte_crypto_op *op;
-       struct ctxt_priv *priv;
        struct rte_mbuf *dst, *src;
 
 #ifdef RTE_LIB_SECURITY
@@ -1612,11 +1626,6 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
         * We can have a better approach to use the inline Mbuf
         */
 
-       if (unlikely(DPAA2_GET_FD_IVP(fd))) {
-               /* TODO complete it. */
-               DPAA2_SEC_ERR("error: non inline buffer");
-               return NULL;
-       }
        op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
 
        /* Prefeth op */
@@ -1652,8 +1661,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 
        /* free the fle memory */
        if (likely(rte_pktmbuf_is_contiguous(src))) {
-               priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
-               rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+               rte_mempool_put(qp->fle_pool, (void *)(fle-1));
        } else
                rte_free((void *)(fle-1));
 
@@ -1738,14 +1746,17 @@ mbuf_dump:
 }
 
 static void
-dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
+                         struct dpaa2_queue *dpaa2_q)
 {
        struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
        struct rte_crypto_op *op;
        struct qbman_fd *fd;
+       struct dpaa2_sec_qp *dpaa2_qp;
 
+       dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
        fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
-       op = sec_fd_to_mbuf(fd);
+       op = sec_fd_to_mbuf(fd, dpaa2_qp);
        /* Instead of freeing, enqueue it to the sec tx queue (sec->core)
         * after setting an error in FD. But this will have performance impact.
         */
@@ -1861,10 +1872,9 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
                        memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
                        mb_pool = (*ops)->sym->m_src->pool;
                        bpid = mempool_to_bpid(mb_pool);
-                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+                       ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
                        if (ret) {
-                               DPAA2_SEC_ERR("error: Improper packet contents"
-                                             " for crypto operation");
+                               DPAA2_SEC_DP_DEBUG("FD build failed\n");
                                goto skip_tx;
                        }
                        ops++;
@@ -1884,7 +1894,8 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
                                        DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
                                        /* freeing the fle buffers */
                                        while (loop < frames_to_send) {
-                                               free_fle(&fd_arr[loop]);
+                                               free_fle(&fd_arr[loop],
+                                                               dpaa2_qp);
                                                loop++;
                                        }
                                        goto skip_tx;
@@ -1982,7 +1993,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
                }
 
                fd = qbman_result_DQ_fd(dq_storage);
-               ops[num_rx] = sec_fd_to_mbuf(fd);
+               ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
 
                if (unlikely(fd->simple.frc)) {
                        /* TODO Parse SEC errors */
@@ -2024,6 +2035,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
                dpaa2_free_dq_storage(qp->rx_vq.q_storage);
                rte_free(qp->rx_vq.q_storage);
        }
+       rte_mempool_free(qp->fle_pool);
        rte_free(qp);
 
        dev->data->queue_pairs[queue_pair_id] = NULL;
@@ -2034,7 +2046,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 /** Setup a queue pair */
 static int
 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-               __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+               const struct rte_cryptodev_qp_conf *qp_conf,
                __rte_unused int socket_id)
 {
        struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
@@ -2042,6 +2054,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
        struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
        struct dpseci_rx_queue_cfg cfg;
        int32_t retcode;
+       char str[30];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2081,6 +2094,19 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
        dev->data->queue_pairs[qp_id] = qp;
 
+       snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
+                       getpid(), dev->data->dev_id, qp_id);
+       qp->fle_pool = rte_mempool_create((const char *)str,
+                       qp_conf->nb_descriptors,
+                       FLE_POOL_BUF_SIZE,
+                       FLE_POOL_CACHE_SIZE, 0,
+                       NULL, NULL, NULL, NULL,
+                       SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
+       if (!qp->fle_pool) {
+               DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
+               return -ENOMEM;
+       }
+
        cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
        cfg.user_ctx = (size_t)(&qp->rx_vq);
        retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
@@ -2098,11 +2124,9 @@ dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
 }
 
 static int
-dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
-                     struct rte_crypto_sym_xform *xform,
+dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
                      dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo cipherdata;
        int bufsize, ret = 0;
        struct ctxt_priv *priv;
@@ -2119,8 +2143,6 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
-
        flc = &priv->flc_desc[0].flc;
 
        session->ctxt_type = DPAA2_SEC_CIPHER;
@@ -2239,11 +2261,9 @@ error_out:
 }
 
 static int
-dpaa2_sec_auth_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata;
        int bufsize, ret = 0;
        struct ctxt_priv *priv;
@@ -2261,7 +2281,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[DESC_INITFINAL].flc;
 
        session->ctxt_type = DPAA2_SEC_AUTH;
@@ -2477,12 +2496,10 @@ error_out:
 }
 
 static int
-dpaa2_sec_aead_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
        struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo aeaddata;
        int bufsize;
        struct ctxt_priv *priv;
@@ -2506,7 +2523,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
@@ -2602,11 +2618,9 @@ error_out:
 
 
 static int
-dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
-                   struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
                    dpaa2_sec_session *session)
 {
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata, cipherdata;
        int bufsize;
        struct ctxt_priv *priv;
@@ -2644,7 +2658,6 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -2850,8 +2863,7 @@ error_out:
 }
 
 static int
-dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
-                           struct rte_crypto_sym_xform *xform, void *sess)
+dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
 {
        dpaa2_sec_session *session = sess;
        int ret;
@@ -2869,37 +2881,37 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
 
        /* Cipher Only */
        if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
-               ret = dpaa2_sec_cipher_init(dev, xform, session);
+               ret = dpaa2_sec_cipher_init(xform, session);
 
        /* Authentication Only */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
                   xform->next == NULL) {
-               ret = dpaa2_sec_auth_init(dev, xform, session);
+               ret = dpaa2_sec_auth_init(xform, session);
 
        /* Cipher then Authenticate */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
                session->ext_params.aead_ctxt.auth_cipher_text = true;
                if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
-                       ret = dpaa2_sec_auth_init(dev, xform, session);
+                       ret = dpaa2_sec_auth_init(xform, session);
                else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
-                       ret = dpaa2_sec_cipher_init(dev, xform, session);
+                       ret = dpaa2_sec_cipher_init(xform, session);
                else
-                       ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+                       ret = dpaa2_sec_aead_chain_init(xform, session);
        /* Authenticate then Cipher */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
                   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
                session->ext_params.aead_ctxt.auth_cipher_text = false;
                if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
-                       ret = dpaa2_sec_cipher_init(dev, xform, session);
+                       ret = dpaa2_sec_cipher_init(xform, session);
                else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
-                       ret = dpaa2_sec_auth_init(dev, xform, session);
+                       ret = dpaa2_sec_auth_init(xform, session);
                else
-                       ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+                       ret = dpaa2_sec_aead_chain_init(xform, session);
        /* AEAD operation for AES-GCM kind of Algorithms */
        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
                   xform->next == NULL) {
-               ret = dpaa2_sec_aead_init(dev, xform, session);
+               ret = dpaa2_sec_aead_init(xform, session);
 
        } else {
                DPAA2_SEC_ERR("Invalid crypto type");
@@ -3148,7 +3160,6 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
        struct alginfo authdata, cipherdata;
        int bufsize;
        struct sec_flow_context *flc;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        int ret = -1;
 
        PMD_INIT_FUNC_TRACE();
@@ -3163,7 +3174,6 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        if (ipsec_xform->life.bytes_hard_limit != 0 ||
@@ -3396,7 +3406,6 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
        struct rte_crypto_cipher_xform *cipher_xform = NULL;
        dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
        struct ctxt_priv *priv;
-       struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
        struct alginfo authdata, cipherdata;
        struct alginfo *p_authdata = NULL;
        int bufsize = -1;
@@ -3421,7 +3430,6 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       priv->fle_pool = dev_priv->fle_pool;
        flc = &priv->flc_desc[0].flc;
 
        /* find xfrm types */
@@ -3759,7 +3767,7 @@ dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
                return -ENOMEM;
        }
 
-       ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+       ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
        if (ret != 0) {
                DPAA2_SEC_ERR("Failed to configure session parameters");
                /* Return session to mempool */
@@ -3990,6 +3998,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
                                 struct dpaa2_queue *rxq,
                                 struct rte_event *ev)
 {
+       struct dpaa2_sec_qp *qp;
        /* Prefetching mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
@@ -3997,6 +4006,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
        /* Prefetching ipsec crypto_op stored in priv data of mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4004,7 +4014,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
        ev->sched_type = rxq->ev.sched_type;
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
-       ev->event_ptr = sec_fd_to_mbuf(fd);
+       ev->event_ptr = sec_fd_to_mbuf(fd, qp);
 
        qbman_swp_dqrr_consume(swp, dq);
 }
@@ -4016,7 +4026,8 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
                                 struct rte_event *ev)
 {
        uint8_t dqrr_index;
-       struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+       struct dpaa2_sec_qp *qp;
+       struct rte_crypto_op *crypto_op;
        /* Prefetching mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
@@ -4024,6 +4035,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
        /* Prefetching ipsec crypto_op stored in priv data of mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4032,12 +4044,13 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
-       ev->event_ptr = sec_fd_to_mbuf(fd);
+       crypto_op = sec_fd_to_mbuf(fd, qp);
        dqrr_index = qbman_get_dqrr_idx(dq);
        *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
        DPAA2_PER_LCORE_DQRR_SIZE++;
        DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
        DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+       ev->event_ptr = crypto_op;
 }
 
 static void __rte_hot
@@ -4047,7 +4060,8 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
                                struct dpaa2_queue *rxq,
                                struct rte_event *ev)
 {
-       struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+       struct rte_crypto_op *crypto_op;
+       struct dpaa2_sec_qp *qp;
 
        /* Prefetching mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
@@ -4056,6 +4070,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
        /* Prefetching ipsec crypto_op stored in priv data of mbuf */
        rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
+       qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4063,7 +4078,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
        ev->sched_type = rxq->ev.sched_type;
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
-       ev->event_ptr = sec_fd_to_mbuf(fd);
+       crypto_op = sec_fd_to_mbuf(fd, qp);
 
        *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
        *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
@@ -4072,6 +4087,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
                DPAA2_EQCR_SEQNUM_SHIFT;
 
        qbman_swp_dqrr_consume(swp, dq);
+       ev->event_ptr = crypto_op;
 }
 
 int
@@ -4237,7 +4253,6 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
        priv->hw = NULL;
        rte_free(dpseci);
        rte_free(dev->security_ctx);
-       rte_mempool_free(priv->fle_pool);
 
        DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
                       dev->data->name, rte_socket_id());
@@ -4305,7 +4320,6 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
        uint16_t token;
        struct dpseci_attr attr;
        int retcode, hw_id;
-       char str[30];
 
        PMD_INIT_FUNC_TRACE();
        dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -4381,19 +4395,6 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
        internals->token = token;
        internals->en_loose_ordered = true;
 
-       snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
-                       getpid(), cryptodev->data->dev_id);
-       internals->fle_pool = rte_mempool_create((const char *)str,
-                       FLE_POOL_NUM_BUFS,
-                       FLE_POOL_BUF_SIZE,
-                       FLE_POOL_CACHE_SIZE, 0,
-                       NULL, NULL, NULL, NULL,
-                       SOCKET_ID_ANY, 0);
-       if (!internals->fle_pool) {
-               DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
-               goto init_error;
-       }
-
        dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
        dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
        DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);