X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa_sec%2Fdpaa_sec.c;h=b685220ad91c0d1a99be6597d22409608eeebac3;hb=fe98e52a52f0989c299883bf7c231b64ae1cd242;hp=66828b5d0fe1305c91acb141518212b50ffcd741;hpb=1f14d500bce13fd7b1117ea7013db6bc8c468666;p=dpdk.git diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 66828b5d0f..b685220ad9 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -84,7 +84,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); ctx->ctx_pool = ses->ctx_pool; - ctx->vtop_offset = (uint64_t) ctx + ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); return ctx; @@ -93,43 +93,25 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) static inline rte_iova_t dpaa_mem_vtop(void *vaddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - uint64_t vaddr_64, paddr; - int i; - - vaddr_64 = (uint64_t)vaddr; - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (vaddr_64 >= memseg[i].addr_64 && - vaddr_64 < memseg[i].addr_64 + memseg[i].len) { - paddr = memseg[i].iova + - (vaddr_64 - memseg[i].addr_64); - - return (rte_iova_t)paddr; - } - } - return (rte_iova_t)(NULL); + const struct rte_memseg *ms; + + ms = rte_mem_virt2memseg(vaddr, NULL); + if (ms) + return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr); + return (size_t)NULL; } /* virtual address conversin when mempool support is available for ctx */ static inline phys_addr_t dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) { - return (uint64_t)vaddr - ctx->vtop_offset; + return (size_t)vaddr - ctx->vtop_offset; } static inline void * dpaa_mem_ptov(rte_iova_t paddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - int i; - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 + - (paddr - memseg[i].iova)); - } - return NULL; + return rte_mem_iova2virt(paddr); } static void @@ -406,7 +388,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -424,7 +406,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -439,7 +421,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) PMD_TX_LOG(ERR, "not supported aead alg\n"); return -ENOTSUP; } - alginfo.key = (uint64_t)ses->aead_key.data; + alginfo.key = (size_t)ses->aead_key.data; alginfo.keylen = ses->aead_key.length; alginfo.key_enc_flags = 0; alginfo.key_type = RTA_DATA_IMM; @@ -463,7 +445,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -474,7 +456,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -493,15 +475,15 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) if (cdb->sh_desc[2] & 1) alginfo_c.key_type = RTA_DATA_IMM; else { - alginfo_c.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_c.key); + alginfo_c.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_c.key); alginfo_c.key_type = RTA_DATA_PTR; } if (cdb->sh_desc[2] & (1<<1)) alginfo_a.key_type = RTA_DATA_IMM; else { - alginfo_a.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_a.key); + alginfo_a.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_a.key); alginfo_a.key_type = RTA_DATA_PTR; } cdb->sh_desc[0] = 0; @@ -537,46 +519,146 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return 0; } -static inline unsigned int -dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact) +/* qp is lockless, should be accessed by only one thread */ +static int +dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) { + struct qman_fq *fq; unsigned int pkts = 0; int ret; - struct qm_mcr_queryfq_np np; - enum qman_fq_state state; - uint32_t flags; - uint32_t vdqcr; - - qman_query_fq_np(fq, &np); - if (np.frm_cnt) { - vdqcr = QM_VDQCR_NUMFRAMES_SET(len); - if (exact) - vdqcr |= QM_VDQCR_EXACT; - ret = qman_volatile_dequeue(fq, 0, vdqcr); - if (ret) - return 0; - do { - pkts += qman_poll_dqrr(len); - qman_fq_state(fq, &state, &flags); - } while (flags & QMAN_FQ_STATE_VDQCR); - } + struct qm_dqrr_entry *dq; + + fq = &qp->outq; + ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? + DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops); + if (ret) + return 0; + + do { + const struct qm_fd *fd; + struct dpaa_sec_job *job; + struct dpaa_sec_op_ctx *ctx; + struct rte_crypto_op *op; + + dq = qman_dequeue(fq); + if (!dq) + continue; + + fd = &dq->fd; + /* sg is embedded in an op ctx, + * sg[0] is for output + * sg[1] for input + */ + job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); + + ctx = container_of(job, struct dpaa_sec_op_ctx, job); + ctx->fd_status = fd->status; + op = ctx->op; + if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + struct qm_sg_entry *sg_out; + uint32_t len; + + sg_out = &job->sg[0]; + hw_sg_to_cpu(sg_out); + len = sg_out->length; + op->sym->m_src->pkt_len = len; + op->sym->m_src->data_len = len; + } + if (!ctx->fd_status) { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else { + printf("\nSEC return err: 0x%x", ctx->fd_status); + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + ops[pkts++] = op; + + /* report op status to sym->op and then free the ctx memeory */ + rte_mempool_put(ctx->ctx_pool, (void *)ctx); + + qman_dqrr_consume(fq, dq); + } while (fq->flags & QMAN_FQ_STATE_VDQCR); + return pkts; } -/* qp is lockless, should be accessed by only one thread */ -static int -dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) +static inline struct dpaa_sec_job * +build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) { - struct qman_fq *fq; + struct rte_crypto_sym_op *sym = op->sym; + struct rte_mbuf *mbuf = sym->m_src; + struct dpaa_sec_job *cf; + struct dpaa_sec_op_ctx *ctx; + struct qm_sg_entry *sg, *out_sg, *in_sg; + phys_addr_t start_addr; + uint8_t *old_digest, extra_segs; - fq = &qp->outq; - dpaa_sec_op_nb = 0; - dpaa_sec_ops = ops; + if (is_decode(ses)) + extra_segs = 3; + else + extra_segs = 2; + + if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) { + PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n", + MAX_SG_ENTRIES); + return NULL; + } + ctx = dpaa_sec_alloc_ctx(ses); + if (!ctx) + return NULL; + + cf = &ctx->job; + ctx->op = op; + old_digest = ctx->digest; + + /* output */ + out_sg = &cf->sg[0]; + qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); + out_sg->length = ses->digest_length; + cpu_to_hw_sg(out_sg); - if (unlikely(nb_ops > DPAA_SEC_BURST)) - nb_ops = DPAA_SEC_BURST; + /* input */ + in_sg = &cf->sg[1]; + /* need to extend the input to a compound frame */ + in_sg->extension = 1; + in_sg->final = 1; + in_sg->length = sym->auth.data.length; + qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + + /* 1st seg */ + sg = in_sg + 1; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->auth.data.offset; + sg->offset = sym->auth.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + + if (is_decode(ses)) { + /* Digest verification case */ + cpu_to_hw_sg(sg); + sg++; + rte_memcpy(old_digest, sym->auth.digest.data, + ses->digest_length); + start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); + qm_sg_entry_set64(sg, start_addr); + sg->length = ses->digest_length; + in_sg->length += ses->digest_length; + } else { + /* Digest calculation case */ + sg->length -= ses->digest_length; + } + sg->final = 1; + cpu_to_hw_sg(sg); + cpu_to_hw_sg(in_sg); - return dpaa_volatile_deq(fq, nb_ops, 1); + return cf; } /** @@ -648,6 +730,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) return cf; } +static inline struct dpaa_sec_job * +build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct dpaa_sec_job *cf; + struct dpaa_sec_op_ctx *ctx; + struct qm_sg_entry *sg, *out_sg, *in_sg; + struct rte_mbuf *mbuf; + uint8_t req_segs; + uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ses->iv.offset); + + if (sym->m_dst) { + mbuf = sym->m_dst; + req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; + } else { + mbuf = sym->m_src; + req_segs = mbuf->nb_segs * 2 + 3; + } + + if (req_segs > MAX_SG_ENTRIES) { + PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n", + MAX_SG_ENTRIES); + return NULL; + } + + ctx = dpaa_sec_alloc_ctx(ses); + if (!ctx) + return NULL; + + cf = &ctx->job; + ctx->op = op; + + /* output */ + out_sg = &cf->sg[0]; + out_sg->extension = 1; + out_sg->length = sym->cipher.data.length; + qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + cpu_to_hw_sg(out_sg); + + /* 1st seg */ + sg = &cf->sg[2]; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->cipher.data.offset; + sg->offset = sym->cipher.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + /* input */ + mbuf = sym->m_src; + in_sg = &cf->sg[1]; + in_sg->extension = 1; + in_sg->final = 1; + in_sg->length = sym->cipher.data.length + ses->iv.length; + + sg++; + qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + cpu_to_hw_sg(in_sg); + + /* IV */ + qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); + sg->length = ses->iv.length; + cpu_to_hw_sg(sg); + + /* 1st seg */ + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->cipher.data.offset; + sg->offset = sym->cipher.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + return cf; +} + static inline struct dpaa_sec_job * build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) { @@ -703,6 +880,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) return cf; } +static inline struct dpaa_sec_job * +build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct dpaa_sec_job *cf; + struct dpaa_sec_op_ctx *ctx; + struct qm_sg_entry *sg, *out_sg, *in_sg; + struct rte_mbuf *mbuf; + uint8_t req_segs; + uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ses->iv.offset); + + if (sym->m_dst) { + mbuf = sym->m_dst; + req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; + } else { + mbuf = sym->m_src; + req_segs = mbuf->nb_segs * 2 + 4; + } + + if (ses->auth_only_len) + req_segs++; + + if (req_segs > MAX_SG_ENTRIES) { + PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n", + MAX_SG_ENTRIES); + return NULL; + } + + ctx = dpaa_sec_alloc_ctx(ses); + if (!ctx) + return NULL; + + cf = &ctx->job; + ctx->op = op; + + rte_prefetch0(cf->sg); + + /* output */ + out_sg = &cf->sg[0]; + out_sg->extension = 1; + if (is_encode(ses)) + out_sg->length = sym->aead.data.length + ses->auth_only_len + + ses->digest_length; + else + out_sg->length = sym->aead.data.length + ses->auth_only_len; + + /* output sg entries */ + sg = &cf->sg[2]; + qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + cpu_to_hw_sg(out_sg); + + /* 1st seg */ + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->aead.data.offset + + ses->auth_only_len; + sg->offset = sym->aead.data.offset - ses->auth_only_len; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + sg->length -= ses->digest_length; + + if (is_encode(ses)) { + cpu_to_hw_sg(sg); + /* set auth output */ + sg++; + qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); + sg->length = ses->digest_length; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + /* input */ + mbuf = sym->m_src; + in_sg = &cf->sg[1]; + in_sg->extension = 1; + in_sg->final = 1; + if (is_encode(ses)) + in_sg->length = ses->iv.length + sym->aead.data.length + + ses->auth_only_len; + else + in_sg->length = ses->iv.length + sym->aead.data.length + + ses->auth_only_len + ses->digest_length; + + /* input sg entries */ + sg++; + qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + cpu_to_hw_sg(in_sg); + + /* 1st seg IV */ + qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); + sg->length = ses->iv.length; + cpu_to_hw_sg(sg); + + /* 2nd seg auth only */ + if (ses->auth_only_len) { + sg++; + qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data)); + sg->length = ses->auth_only_len; + cpu_to_hw_sg(sg); + } + + /* 3rd seg */ + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->aead.data.offset; + sg->offset = sym->aead.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + + if (is_decode(ses)) { + cpu_to_hw_sg(sg); + sg++; + memcpy(ctx->digest, sym->aead.digest.data, + ses->digest_length); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + sg->length = ses->digest_length; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + return cf; +} + static inline struct dpaa_sec_job * build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) { @@ -815,6 +1131,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) return cf; } +static inline struct dpaa_sec_job * +build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct dpaa_sec_job *cf; + struct dpaa_sec_op_ctx *ctx; + struct qm_sg_entry *sg, *out_sg, *in_sg; + struct rte_mbuf *mbuf; + uint8_t req_segs; + uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ses->iv.offset); + + if (sym->m_dst) { + mbuf = sym->m_dst; + req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; + } else { + mbuf = sym->m_src; + req_segs = mbuf->nb_segs * 2 + 4; + } + + if (req_segs > MAX_SG_ENTRIES) { + PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n", + MAX_SG_ENTRIES); + return NULL; + } + + ctx = dpaa_sec_alloc_ctx(ses); + if (!ctx) + return NULL; + + cf = &ctx->job; + ctx->op = op; + + rte_prefetch0(cf->sg); + + /* output */ + out_sg = &cf->sg[0]; + out_sg->extension = 1; + if (is_encode(ses)) + out_sg->length = sym->auth.data.length + ses->digest_length; + else + out_sg->length = sym->auth.data.length; + + /* output sg entries */ + sg = &cf->sg[2]; + qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + cpu_to_hw_sg(out_sg); + + /* 1st seg */ + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->auth.data.offset; + sg->offset = sym->auth.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + sg->length -= ses->digest_length; + + if (is_encode(ses)) { + cpu_to_hw_sg(sg); + /* set auth output */ + sg++; + qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); + sg->length = ses->digest_length; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + /* input */ + mbuf = sym->m_src; + in_sg = &cf->sg[1]; + in_sg->extension = 1; + in_sg->final = 1; + if (is_encode(ses)) + in_sg->length = ses->iv.length + sym->auth.data.length; + else + in_sg->length = ses->iv.length + sym->auth.data.length + + ses->digest_length; + + /* input sg entries */ + sg++; + qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + cpu_to_hw_sg(in_sg); + + /* 1st seg IV */ + qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); + sg->length = ses->iv.length; + cpu_to_hw_sg(sg); + + /* 2nd seg */ + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len - sym->auth.data.offset; + sg->offset = sym->auth.data.offset; + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + cpu_to_hw_sg(sg); + sg++; + qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); + sg->length = mbuf->data_len; + mbuf = mbuf->next; + } + + sg->length -= ses->digest_length; + if (is_decode(ses)) { + cpu_to_hw_sg(sg); + sg++; + memcpy(ctx->digest, sym->auth.digest.data, + ses->digest_length); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + sg->length = ses->digest_length; + } + sg->final = 1; + cpu_to_hw_sg(sg); + + return cf; +} + static inline struct dpaa_sec_job * build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) { @@ -949,95 +1391,126 @@ build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) return cf; } -static int -dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp) -{ - struct dpaa_sec_job *cf; - dpaa_sec_session *ses; - struct qm_fd fd; - int ret; - uint32_t auth_only_len = op->sym->auth.data.length - - op->sym->cipher.data.length; - - if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ses = (dpaa_sec_session *)get_session_private_data( - op->sym->session, cryptodev_driver_id); - else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) - ses = (dpaa_sec_session *)get_sec_session_private_data( - op->sym->sec_session); - else - return -ENOTSUP; - - if (unlikely(!ses->qp || ses->qp != qp)) { - PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp); - if (dpaa_sec_attach_sess_q(qp, ses)) - return -1; - } - - /* - * Segmented buffer is not supported. - */ - if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { - op->status = RTE_CRYPTO_OP_STATUS_ERROR; - return -ENOTSUP; - } - if (is_auth_only(ses)) { - cf = build_auth_only(op, ses); - } else if (is_cipher_only(ses)) { - cf = build_cipher_only(op, ses); - } else if (is_aead(ses)) { - cf = build_cipher_auth_gcm(op, ses); - auth_only_len = ses->auth_only_len; - } else if (is_auth_cipher(ses)) { - cf = build_cipher_auth(op, ses); - } else if (is_proto_ipsec(ses)) { - cf = build_proto(op, ses); - } else { - PMD_TX_LOG(ERR, "not supported sec op"); - return -ENOTSUP; - } - if (unlikely(!cf)) - return -ENOMEM; - - memset(&fd, 0, sizeof(struct qm_fd)); - qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg)); - fd._format1 = qm_fd_compound; - fd.length29 = 2 * sizeof(struct qm_sg_entry); - /* Auth_only_len is set as 0 in descriptor and it is overwritten - * here in the fd.cmd which will update the DPOVRD reg. - */ - if (auth_only_len) - fd.cmd = 0x80000000 | auth_only_len; - do { - ret = qman_enqueue(ses->inq, &fd, 0); - } while (ret != 0); - - return 0; -} - static uint16_t dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { /* Function to transmit the frames to given device and queuepair */ uint32_t loop; - int32_t ret; struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; uint16_t num_tx = 0; + struct qm_fd fds[DPAA_SEC_BURST], *fd; + uint32_t frames_to_send; + struct rte_crypto_op *op; + struct dpaa_sec_job *cf; + dpaa_sec_session *ses; + struct dpaa_sec_op_ctx *ctx; + uint32_t auth_only_len; + struct qman_fq *inq[DPAA_SEC_BURST]; + + while (nb_ops) { + frames_to_send = (nb_ops > DPAA_SEC_BURST) ? + DPAA_SEC_BURST : nb_ops; + for (loop = 0; loop < frames_to_send; loop++) { + op = *(ops++); + switch (op->sess_type) { + case RTE_CRYPTO_OP_WITH_SESSION: + ses = (dpaa_sec_session *) + get_session_private_data( + op->sym->session, + cryptodev_driver_id); + break; + case RTE_CRYPTO_OP_SECURITY_SESSION: + ses = (dpaa_sec_session *) + get_sec_session_private_data( + op->sym->sec_session); + break; + default: + PMD_TX_LOG(ERR, + "sessionless crypto op not supported"); + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; + } + if (unlikely(!ses->qp || ses->qp != qp)) { + PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", + ses->qp, qp); + if (dpaa_sec_attach_sess_q(qp, ses)) { + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; + } + } - if (unlikely(nb_ops == 0)) - return 0; + auth_only_len = op->sym->auth.data.length - + op->sym->cipher.data.length; + if (rte_pktmbuf_is_contiguous(op->sym->m_src)) { + if (is_auth_only(ses)) { + cf = build_auth_only(op, ses); + } else if (is_cipher_only(ses)) { + cf = build_cipher_only(op, ses); + } else if (is_aead(ses)) { + cf = build_cipher_auth_gcm(op, ses); + auth_only_len = ses->auth_only_len; + } else if (is_auth_cipher(ses)) { + cf = build_cipher_auth(op, ses); + } else if (is_proto_ipsec(ses)) { + cf = build_proto(op, ses); + } else { + PMD_TX_LOG(ERR, "not supported sec op"); + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; + } + } else { + if (is_auth_only(ses)) { + cf = build_auth_only_sg(op, ses); + } else if (is_cipher_only(ses)) { + cf = build_cipher_only_sg(op, ses); + } else if (is_aead(ses)) { + cf = build_cipher_auth_gcm_sg(op, ses); + auth_only_len = ses->auth_only_len; + } else if (is_auth_cipher(ses)) { + cf = build_cipher_auth_sg(op, ses); + } else { + PMD_TX_LOG(ERR, "not supported sec op"); + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; + } + } + if (unlikely(!cf)) { + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; + } + + fd = &fds[loop]; + inq[loop] = ses->inq; + fd->opaque_addr = 0; + fd->cmd = 0; + ctx = container_of(cf, struct dpaa_sec_op_ctx, job); + qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg)); + fd->_format1 = qm_fd_compound; + fd->length29 = 2 * sizeof(struct qm_sg_entry); + /* Auth_only_len is set as 0 in descriptor and it is + * overwritten here in the fd.cmd which will update + * the DPOVRD reg. + */ + if (auth_only_len) + fd->cmd = 0x80000000 | auth_only_len; - /*Prepare each packet which is to be sent*/ - for (loop = 0; loop < nb_ops; loop++) { - if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - PMD_TX_LOG(ERR, "sessionless crypto op not supported"); - return 0; } - ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp); - if (!ret) - num_tx++; +send_pkts: + loop = 0; + while (loop < frames_to_send) { + loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], + frames_to_send - loop); + } + nb_ops -= frames_to_send; + num_tx += frames_to_send; } + dpaa_qp->tx_pkts += num_tx; dpaa_qp->tx_errs += nb_ops - num_tx; @@ -1248,6 +1721,8 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) for (i = 0; i < qi->max_nb_sessions; i++) { if (&qi->inq[i] == fq) { + qman_retire_fq(fq, NULL); + qman_oos_fq(fq); qi->inq_attach[i] = 0; return 0; } @@ -1789,7 +2264,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_SECURITY; + RTE_CRYPTODEV_FF_SECURITY | + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; internals = cryptodev->data->dev_private; internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;