crypto/dpaa_sec: fix null check in uninit
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
index f2f9328..a260a11 100644 (file)
@@ -84,7 +84,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
        dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
        ctx->ctx_pool = ses->ctx_pool;
-       ctx->vtop_offset = (uint64_t) ctx
+       ctx->vtop_offset = (size_t) ctx
                                - rte_mempool_virt2iova(ctx);
 
        return ctx;
@@ -93,43 +93,25 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
 static inline rte_iova_t
 dpaa_mem_vtop(void *vaddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
-       uint64_t vaddr_64, paddr;
-       int i;
-
-       vaddr_64 = (uint64_t)vaddr;
-       for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-               if (vaddr_64 >= memseg[i].addr_64 &&
-                   vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
-                       paddr = memseg[i].iova +
-                               (vaddr_64 - memseg[i].addr_64);
-
-                       return (rte_iova_t)paddr;
-               }
-       }
-       return (rte_iova_t)(NULL);
+       const struct rte_memseg *ms;
+
+       ms = rte_mem_virt2memseg(vaddr, NULL);
+       if (ms)
+               return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+       return (size_t)NULL;
 }
 
 /* virtual address conversin when mempool support is available for ctx */
 static inline phys_addr_t
 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
 {
-       return (uint64_t)vaddr - ctx->vtop_offset;
+       return (size_t)vaddr - ctx->vtop_offset;
 }
 
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
-       int i;
-
-       for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-               if (paddr >= memseg[i].iova &&
-                   (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
-                       return (void *)(memseg[i].addr_64 +
-                                       (paddr - memseg[i].iova));
-       }
-       return NULL;
+       return rte_mem_iova2virt(paddr);
 }
 
 static void
@@ -406,7 +388,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                        return -ENOTSUP;
                }
 
-               alginfo_c.key = (uint64_t)ses->cipher_key.data;
+               alginfo_c.key = (size_t)ses->cipher_key.data;
                alginfo_c.keylen = ses->cipher_key.length;
                alginfo_c.key_enc_flags = 0;
                alginfo_c.key_type = RTA_DATA_IMM;
@@ -424,7 +406,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                        return -ENOTSUP;
                }
 
-               alginfo_a.key = (uint64_t)ses->auth_key.data;
+               alginfo_a.key = (size_t)ses->auth_key.data;
                alginfo_a.keylen = ses->auth_key.length;
                alginfo_a.key_enc_flags = 0;
                alginfo_a.key_type = RTA_DATA_IMM;
@@ -439,7 +421,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                        PMD_TX_LOG(ERR, "not supported aead alg\n");
                        return -ENOTSUP;
                }
-               alginfo.key = (uint64_t)ses->aead_key.data;
+               alginfo.key = (size_t)ses->aead_key.data;
                alginfo.keylen = ses->aead_key.length;
                alginfo.key_enc_flags = 0;
                alginfo.key_type = RTA_DATA_IMM;
@@ -463,7 +445,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                        return -ENOTSUP;
                }
 
-               alginfo_c.key = (uint64_t)ses->cipher_key.data;
+               alginfo_c.key = (size_t)ses->cipher_key.data;
                alginfo_c.keylen = ses->cipher_key.length;
                alginfo_c.key_enc_flags = 0;
                alginfo_c.key_type = RTA_DATA_IMM;
@@ -474,7 +456,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                        return -ENOTSUP;
                }
 
-               alginfo_a.key = (uint64_t)ses->auth_key.data;
+               alginfo_a.key = (size_t)ses->auth_key.data;
                alginfo_a.keylen = ses->auth_key.length;
                alginfo_a.key_enc_flags = 0;
                alginfo_a.key_type = RTA_DATA_IMM;
@@ -493,15 +475,15 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                if (cdb->sh_desc[2] & 1)
                        alginfo_c.key_type = RTA_DATA_IMM;
                else {
-                       alginfo_c.key = (uint64_t)dpaa_mem_vtop(
-                                                       (void *)alginfo_c.key);
+                       alginfo_c.key = (size_t)dpaa_mem_vtop(
+                                               (void *)(size_t)alginfo_c.key);
                        alginfo_c.key_type = RTA_DATA_PTR;
                }
                if (cdb->sh_desc[2] & (1<<1))
                        alginfo_a.key_type = RTA_DATA_IMM;
                else {
-                       alginfo_a.key = (uint64_t)dpaa_mem_vtop(
-                                                       (void *)alginfo_a.key);
+                       alginfo_a.key = (size_t)dpaa_mem_vtop(
+                                               (void *)(size_t)alginfo_a.key);
                        alginfo_a.key_type = RTA_DATA_PTR;
                }
                cdb->sh_desc[0] = 0;
@@ -537,46 +519,146 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
        return 0;
 }
 
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 {
+       struct qman_fq *fq;
        unsigned int pkts = 0;
        int ret;
-       struct qm_mcr_queryfq_np np;
-       enum qman_fq_state state;
-       uint32_t flags;
-       uint32_t vdqcr;
-
-       qman_query_fq_np(fq, &np);
-       if (np.frm_cnt) {
-               vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
-               if (exact)
-                       vdqcr |= QM_VDQCR_EXACT;
-               ret = qman_volatile_dequeue(fq, 0, vdqcr);
-               if (ret)
-                       return 0;
-               do {
-                       pkts += qman_poll_dqrr(len);
-                       qman_fq_state(fq, &state, &flags);
-               } while (flags & QMAN_FQ_STATE_VDQCR);
-       }
+       struct qm_dqrr_entry *dq;
+
+       fq = &qp->outq;
+       ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+                               DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+       if (ret)
+               return 0;
+
+       do {
+               const struct qm_fd *fd;
+               struct dpaa_sec_job *job;
+               struct dpaa_sec_op_ctx *ctx;
+               struct rte_crypto_op *op;
+
+               dq = qman_dequeue(fq);
+               if (!dq)
+                       continue;
+
+               fd = &dq->fd;
+               /* sg is embedded in an op ctx,
+                * sg[0] is for output
+                * sg[1] for input
+                */
+               job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+               ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+               ctx->fd_status = fd->status;
+               op = ctx->op;
+               if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                       struct qm_sg_entry *sg_out;
+                       uint32_t len;
+
+                       sg_out = &job->sg[0];
+                       hw_sg_to_cpu(sg_out);
+                       len = sg_out->length;
+                       op->sym->m_src->pkt_len = len;
+                       op->sym->m_src->data_len = len;
+               }
+               if (!ctx->fd_status) {
+                       op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+               } else {
+                       printf("\nSEC return err: 0x%x", ctx->fd_status);
+                       op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               }
+               ops[pkts++] = op;
+
+               /* report op status to sym->op and then free the ctx memeory */
+               rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+               qman_dqrr_consume(fq, dq);
+       } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
        return pkts;
 }
 
-/* qp is lockless, should be accessed by only one thread */
-static int
-dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
-       struct qman_fq *fq;
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct rte_mbuf *mbuf = sym->m_src;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       phys_addr_t start_addr;
+       uint8_t *old_digest, extra_segs;
 
-       fq = &qp->outq;
-       dpaa_sec_op_nb = 0;
-       dpaa_sec_ops = ops;
+       if (is_decode(ses))
+               extra_segs = 3;
+       else
+               extra_segs = 2;
+
+       if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
+                                                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+       old_digest = ctx->digest;
 
-       if (unlikely(nb_ops > DPAA_SEC_BURST))
-               nb_ops = DPAA_SEC_BURST;
+       /* output */
+       out_sg = &cf->sg[0];
+       qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+       out_sg->length = ses->digest_length;
+       cpu_to_hw_sg(out_sg);
+
+       /* input */
+       in_sg = &cf->sg[1];
+       /* need to extend the input to a compound frame */
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       in_sg->length = sym->auth.data.length;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+
+       /* 1st seg */
+       sg = in_sg + 1;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
 
-       return dpaa_volatile_deq(fq, nb_ops, 1);
+       if (is_decode(ses)) {
+               /* Digest verification case */
+               cpu_to_hw_sg(sg);
+               sg++;
+               rte_memcpy(old_digest, sym->auth.digest.data,
+                               ses->digest_length);
+               start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+               qm_sg_entry_set64(sg, start_addr);
+               sg->length = ses->digest_length;
+               in_sg->length += ses->digest_length;
+       } else {
+               /* Digest calculation case */
+               sg->length -= ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+       cpu_to_hw_sg(in_sg);
+
+       return cf;
 }
 
 /**
@@ -648,6 +730,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 3;
+       }
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
+                                                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       out_sg->length = sym->cipher.data.length;
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->cipher.data.offset;
+       sg->offset = sym->cipher.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 1st seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->cipher.data.offset;
+       sg->offset = sym->cipher.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -703,6 +880,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 4;
+       }
+
+       if (ses->auth_only_len)
+               req_segs++;
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       rte_prefetch0(cf->sg);
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       if (is_encode(ses))
+               out_sg->length = sym->aead.data.length + ses->auth_only_len
+                                               + ses->digest_length;
+       else
+               out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+       /* output sg entries */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->aead.data.offset +
+                                       ses->auth_only_len;
+       sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->length -= ses->digest_length;
+
+       if (is_encode(ses)) {
+               cpu_to_hw_sg(sg);
+               /* set auth output */
+               sg++;
+               qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       if (is_encode(ses))
+               in_sg->length = ses->iv.length + sym->aead.data.length
+                                                       + ses->auth_only_len;
+       else
+               in_sg->length = ses->iv.length + sym->aead.data.length
+                               + ses->auth_only_len + ses->digest_length;
+
+       /* input sg entries */
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* 1st seg IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 2nd seg auth only */
+       if (ses->auth_only_len) {
+               sg++;
+               qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+               sg->length = ses->auth_only_len;
+               cpu_to_hw_sg(sg);
+       }
+
+       /* 3rd seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->aead.data.offset;
+       sg->offset = sym->aead.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+
+       if (is_decode(ses)) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               memcpy(ctx->digest, sym->aead.digest.data,
+                       ses->digest_length);
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -815,6 +1131,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 4;
+       }
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       rte_prefetch0(cf->sg);
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       if (is_encode(ses))
+               out_sg->length = sym->auth.data.length + ses->digest_length;
+       else
+               out_sg->length = sym->auth.data.length;
+
+       /* output sg entries */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->length -= ses->digest_length;
+
+       if (is_encode(ses)) {
+               cpu_to_hw_sg(sg);
+               /* set auth output */
+               sg++;
+               qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       if (is_encode(ses))
+               in_sg->length = ses->iv.length + sym->auth.data.length;
+       else
+               in_sg->length = ses->iv.length + sym->auth.data.length
+                                               + ses->digest_length;
+
+       /* input sg entries */
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* 1st seg IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 2nd seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+
+       sg->length -= ses->digest_length;
+       if (is_decode(ses)) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               memcpy(ctx->digest, sym->auth.digest.data,
+                       ses->digest_length);
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -949,95 +1391,126 @@ build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
-{
-       struct dpaa_sec_job *cf;
-       dpaa_sec_session *ses;
-       struct qm_fd fd;
-       int ret;
-       uint32_t auth_only_len = op->sym->auth.data.length -
-                               op->sym->cipher.data.length;
-
-       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-               ses = (dpaa_sec_session *)get_session_private_data(
-                               op->sym->session, cryptodev_driver_id);
-       else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
-               ses = (dpaa_sec_session *)get_sec_session_private_data(
-                               op->sym->sec_session);
-       else
-               return -ENOTSUP;
-
-       if (unlikely(!ses->qp || ses->qp != qp)) {
-               PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
-               if (dpaa_sec_attach_sess_q(qp, ses))
-                       return -1;
-       }
-
-       /*
-        * Segmented buffer is not supported.
-        */
-       if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-               return -ENOTSUP;
-       }
-       if (is_auth_only(ses)) {
-               cf = build_auth_only(op, ses);
-       } else if (is_cipher_only(ses)) {
-               cf = build_cipher_only(op, ses);
-       } else if (is_aead(ses)) {
-               cf = build_cipher_auth_gcm(op, ses);
-               auth_only_len = ses->auth_only_len;
-       } else if (is_auth_cipher(ses)) {
-               cf = build_cipher_auth(op, ses);
-       } else if (is_proto_ipsec(ses)) {
-               cf = build_proto(op, ses);
-       } else {
-               PMD_TX_LOG(ERR, "not supported sec op");
-               return -ENOTSUP;
-       }
-       if (unlikely(!cf))
-               return -ENOMEM;
-
-       memset(&fd, 0, sizeof(struct qm_fd));
-       qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
-       fd._format1 = qm_fd_compound;
-       fd.length29 = 2 * sizeof(struct qm_sg_entry);
-       /* Auth_only_len is set as 0 in descriptor and it is overwritten
-        * here in the fd.cmd which will update the DPOVRD reg.
-        */
-       if (auth_only_len)
-               fd.cmd = 0x80000000 | auth_only_len;
-       do {
-               ret = qman_enqueue(ses->inq, &fd, 0);
-       } while (ret != 0);
-
-       return 0;
-}
-
 static uint16_t
 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
                       uint16_t nb_ops)
 {
        /* Function to transmit the frames to given device and queuepair */
        uint32_t loop;
-       int32_t ret;
        struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
        uint16_t num_tx = 0;
+       struct qm_fd fds[DPAA_SEC_BURST], *fd;
+       uint32_t frames_to_send;
+       struct rte_crypto_op *op;
+       struct dpaa_sec_job *cf;
+       dpaa_sec_session *ses;
+       struct dpaa_sec_op_ctx *ctx;
+       uint32_t auth_only_len;
+       struct qman_fq *inq[DPAA_SEC_BURST];
+
+       while (nb_ops) {
+               frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+                               DPAA_SEC_BURST : nb_ops;
+               for (loop = 0; loop < frames_to_send; loop++) {
+                       op = *(ops++);
+                       switch (op->sess_type) {
+                       case RTE_CRYPTO_OP_WITH_SESSION:
+                               ses = (dpaa_sec_session *)
+                                       get_session_private_data(
+                                                       op->sym->session,
+                                                       cryptodev_driver_id);
+                               break;
+                       case RTE_CRYPTO_OP_SECURITY_SESSION:
+                               ses = (dpaa_sec_session *)
+                                       get_sec_session_private_data(
+                                                       op->sym->sec_session);
+                               break;
+                       default:
+                               PMD_TX_LOG(ERR,
+                                       "sessionless crypto op not supported");
+                               frames_to_send = loop;
+                               nb_ops = loop;
+                               goto send_pkts;
+                       }
+                       if (unlikely(!ses->qp || ses->qp != qp)) {
+                               PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+                                               ses->qp, qp);
+                               if (dpaa_sec_attach_sess_q(qp, ses)) {
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       }
 
-       if (unlikely(nb_ops == 0))
-               return 0;
+                       auth_only_len = op->sym->auth.data.length -
+                                               op->sym->cipher.data.length;
+                       if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+                               if (is_auth_only(ses)) {
+                                       cf = build_auth_only(op, ses);
+                               } else if (is_cipher_only(ses)) {
+                                       cf = build_cipher_only(op, ses);
+                               } else if (is_aead(ses)) {
+                                       cf = build_cipher_auth_gcm(op, ses);
+                                       auth_only_len = ses->auth_only_len;
+                               } else if (is_auth_cipher(ses)) {
+                                       cf = build_cipher_auth(op, ses);
+                               } else if (is_proto_ipsec(ses)) {
+                                       cf = build_proto(op, ses);
+                               } else {
+                                       PMD_TX_LOG(ERR, "not supported sec op");
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       } else {
+                               if (is_auth_only(ses)) {
+                                       cf = build_auth_only_sg(op, ses);
+                               } else if (is_cipher_only(ses)) {
+                                       cf = build_cipher_only_sg(op, ses);
+                               } else if (is_aead(ses)) {
+                                       cf = build_cipher_auth_gcm_sg(op, ses);
+                                       auth_only_len = ses->auth_only_len;
+                               } else if (is_auth_cipher(ses)) {
+                                       cf = build_cipher_auth_sg(op, ses);
+                               } else {
+                                       PMD_TX_LOG(ERR, "not supported sec op");
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       }
+                       if (unlikely(!cf)) {
+                               frames_to_send = loop;
+                               nb_ops = loop;
+                               goto send_pkts;
+                       }
+
+                       fd = &fds[loop];
+                       inq[loop] = ses->inq;
+                       fd->opaque_addr = 0;
+                       fd->cmd = 0;
+                       ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
+                       qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+                       fd->_format1 = qm_fd_compound;
+                       fd->length29 = 2 * sizeof(struct qm_sg_entry);
+                       /* Auth_only_len is set as 0 in descriptor and it is
+                        * overwritten here in the fd.cmd which will update
+                        * the DPOVRD reg.
+                        */
+                       if (auth_only_len)
+                               fd->cmd = 0x80000000 | auth_only_len;
 
-       /*Prepare each packet which is to be sent*/
-       for (loop = 0; loop < nb_ops; loop++) {
-               if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-                       PMD_TX_LOG(ERR, "sessionless crypto op not supported");
-                       return 0;
                }
-               ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
-               if (!ret)
-                       num_tx++;
+send_pkts:
+               loop = 0;
+               while (loop < frames_to_send) {
+                       loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+                                       frames_to_send - loop);
+               }
+               nb_ops -= frames_to_send;
+               num_tx += frames_to_send;
        }
+
        dpaa_qp->tx_pkts += num_tx;
        dpaa_qp->tx_errs += nb_ops - num_tx;
 
@@ -1268,7 +1741,13 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
                PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
                return -1;
        }
-
+       if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+               ret = rte_dpaa_portal_init((void *)0);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "Failure in affining portal");
+                       return ret;
+               }
+       }
        ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
                               qman_fq_fqid(&qp->outq));
        if (ret)
@@ -1755,11 +2234,12 @@ struct rte_security_ops dpaa_sec_security_ops = {
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
-       struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+       struct dpaa_sec_dev_private *internals;
 
        if (dev == NULL)
                return -ENODEV;
 
+       internals = dev->data->dev_private;
        rte_free(dev->security_ctx);
 
        rte_mempool_free(internals->ctx_pool);
@@ -1791,7 +2271,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
        cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
                        RTE_CRYPTODEV_FF_HW_ACCELERATED |
                        RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-                       RTE_CRYPTODEV_FF_SECURITY;
+                       RTE_CRYPTODEV_FF_SECURITY |
+                       RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
        internals = cryptodev->data->dev_private;
        internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
@@ -1953,5 +2434,5 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = {
 static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
                cryptodev_driver_id);