From: Nagadheeraj Rottela Date: Tue, 1 Oct 2019 06:41:31 +0000 (+0000) Subject: crypto/nitrox: add burst enqueue and dequeue ops X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=1acffa392934a858d20338f8d87f03df461361aa;p=dpdk.git crypto/nitrox: add burst enqueue and dequeue ops Add burst enqueue and dequeue operations along with interface for symmetric request manager. Signed-off-by: Nagadheeraj Rottela Acked-by: Akhil Goyal --- diff --git a/drivers/crypto/nitrox/nitrox_qp.h b/drivers/crypto/nitrox/nitrox_qp.h index 0244c4dbf4..d42d53f92b 100644 --- a/drivers/crypto/nitrox/nitrox_qp.h +++ b/drivers/crypto/nitrox/nitrox_qp.h @@ -34,12 +34,68 @@ struct nitrox_qp { rte_atomic16_t pending_count; }; +static inline uint16_t +nitrox_qp_free_count(struct nitrox_qp *qp) +{ + uint16_t pending_count = rte_atomic16_read(&qp->pending_count); + + RTE_ASSERT(qp->count >= pending_count); + return (qp->count - pending_count); +} + static inline bool nitrox_qp_is_empty(struct nitrox_qp *qp) { return (rte_atomic16_read(&qp->pending_count) == 0); } +static inline uint16_t +nitrox_qp_used_count(struct nitrox_qp *qp) +{ + return rte_atomic16_read(&qp->pending_count); +} + +static inline struct nitrox_softreq * +nitrox_qp_get_softreq(struct nitrox_qp *qp) +{ + uint32_t tail = qp->tail % qp->count; + + rte_smp_rmb(); + return qp->ridq[tail].sr; +} + +static inline void +nitrox_ring_dbell(struct nitrox_qp *qp, uint16_t cnt) +{ + struct command_queue *cmdq = &qp->cmdq; + + if (!cnt) + return; + + rte_io_wmb(); + rte_write64(cnt, cmdq->dbell_csr_addr); +} + +static inline void +nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr) +{ + uint32_t head = qp->head % qp->count; + + qp->head++; + memcpy(&qp->cmdq.ring[head * qp->cmdq.instr_size], + instr, qp->cmdq.instr_size); + qp->ridq[head].sr = sr; + rte_smp_wmb(); + rte_atomic16_inc(&qp->pending_count); +} + +static inline void +nitrox_qp_dequeue(struct nitrox_qp *qp) +{ + qp->tail++; + rte_atomic16_dec(&qp->pending_count); +} + int nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, const char *dev_name, uint32_t nb_descriptors, uint8_t inst_size, int socket_id); diff --git a/drivers/crypto/nitrox/nitrox_sym.c b/drivers/crypto/nitrox/nitrox_sym.c index c7d3b8d49d..56337604ad 100644 --- a/drivers/crypto/nitrox/nitrox_sym.c +++ b/drivers/crypto/nitrox/nitrox_sym.c @@ -539,6 +539,123 @@ nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev, rte_mempool_put(sess_mp, ctx); } +static struct nitrox_crypto_ctx * +get_crypto_ctx(struct rte_crypto_op *op) +{ + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (likely(op->sym->session)) + return get_sym_session_private_data(op->sym->session, + nitrox_sym_drv_id); + } + + return NULL; +} + +static int +nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op) +{ + struct nitrox_crypto_ctx *ctx; + struct nitrox_softreq *sr; + int err; + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + ctx = get_crypto_ctx(op); + if (unlikely(!ctx)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr))) + return -ENOMEM; + + err = nitrox_process_se_req(qp->qno, op, ctx, sr); + if (unlikely(err)) { + rte_mempool_put(qp->sr_mp, sr); + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return err; + } + + nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr); + return 0; +} + +static uint16_t +nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct nitrox_qp *qp = queue_pair; + uint16_t free_slots = 0; + uint16_t cnt = 0; + bool err = false; + + free_slots = nitrox_qp_free_count(qp); + if (nb_ops > free_slots) + nb_ops = free_slots; + + for (cnt = 0; cnt < nb_ops; cnt++) { + if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) { + err = true; + break; + } + } + + nitrox_ring_dbell(qp, cnt); + qp->stats.enqueued_count += cnt; + if (unlikely(err)) + qp->stats.enqueue_err_count++; + + return cnt; +} + +static int +nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr) +{ + struct nitrox_softreq *sr; + int ret; + struct rte_crypto_op *op; + + sr = nitrox_qp_get_softreq(qp); + ret = nitrox_check_se_req(sr, op_ptr); + if (ret < 0) + return -EAGAIN; + + op = *op_ptr; + nitrox_qp_dequeue(qp); + rte_mempool_put(qp->sr_mp, sr); + if (!ret) { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + qp->stats.dequeued_count++; + + return 0; + } + + if (ret == MC_MAC_MISMATCH_ERR_CODE) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + else + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + + qp->stats.dequeue_err_count++; + return 0; +} + +static uint16_t +nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct nitrox_qp *qp = queue_pair; + uint16_t filled_slots = nitrox_qp_used_count(qp); + int cnt = 0; + + if (nb_ops > filled_slots) + nb_ops = filled_slots; + + for (cnt = 0; cnt < nb_ops; cnt++) + if (nitrox_deq_single_op(qp, &ops[cnt])) + break; + + return cnt; +} + static struct rte_cryptodev_ops nitrox_cryptodev_ops = { .dev_configure = nitrox_sym_dev_config, .dev_start = nitrox_sym_dev_start, @@ -580,8 +697,8 @@ nitrox_sym_pmd_create(struct nitrox_device *ndev) ndev->rte_sym_dev.name = cdev->data->name; cdev->driver_id = nitrox_sym_drv_id; cdev->dev_ops = &nitrox_cryptodev_ops; - cdev->enqueue_burst = NULL; - cdev->dequeue_burst = NULL; + cdev->enqueue_burst = nitrox_sym_dev_enq_burst; + cdev->dequeue_burst = nitrox_sym_dev_deq_burst; cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c index 42d67317c7..e472750948 100644 --- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c @@ -4,12 +4,113 @@ #include #include +#include #include #include "nitrox_sym_reqmgr.h" #include "nitrox_logs.h" +#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL +#define CMD_TIMEOUT 2 + +union pkt_instr_hdr { + uint64_t value; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz_48_63 : 16; + uint64_t g : 1; + uint64_t gsz : 7; + uint64_t ihi : 1; + uint64_t ssz : 7; + uint64_t raz_30_31 : 2; + uint64_t fsz : 6; + uint64_t raz_16_23 : 8; + uint64_t tlen : 16; +#else + uint64_t tlen : 16; + uint64_t raz_16_23 : 8; + uint64_t fsz : 6; + uint64_t raz_30_31 : 2; + uint64_t ssz : 7; + uint64_t ihi : 1; + uint64_t gsz : 7; + uint64_t g : 1; + uint64_t raz_48_63 : 16; +#endif + } s; +}; + +union pkt_hdr { + uint64_t value[2]; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t opcode : 8; + uint64_t arg : 8; + uint64_t ctxc : 2; + uint64_t unca : 1; + uint64_t raz_44 : 1; + uint64_t info : 3; + uint64_t destport : 9; + uint64_t unc : 8; + uint64_t raz_19_23 : 5; + uint64_t grp : 3; + uint64_t raz_15 : 1; + uint64_t ctxl : 7; + uint64_t uddl : 8; +#else + uint64_t uddl : 8; + uint64_t ctxl : 7; + uint64_t raz_15 : 1; + uint64_t grp : 3; + uint64_t raz_19_23 : 5; + uint64_t unc : 8; + uint64_t destport : 9; + uint64_t info : 3; + uint64_t raz_44 : 1; + uint64_t unca : 1; + uint64_t ctxc : 2; + uint64_t arg : 8; + uint64_t opcode : 8; +#endif + uint64_t ctxp; + } s; +}; + +union slc_store_info { + uint64_t value[2]; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t raz_39_63 : 25; + uint64_t ssz : 7; + uint64_t raz_0_31 : 32; +#else + uint64_t raz_0_31 : 32; + uint64_t ssz : 7; + uint64_t raz_39_63 : 25; +#endif + uint64_t rptr; + } s; +}; + +struct nps_pkt_instr { + uint64_t dptr0; + union pkt_instr_hdr ih; + union pkt_hdr irh; + union slc_store_info slc; + uint64_t fdata[2]; +}; + +struct resp_hdr { + uint64_t orh; + uint64_t completion; +}; + struct nitrox_softreq { + struct nitrox_crypto_ctx *ctx; + struct rte_crypto_op *op; + struct nps_pkt_instr instr; + struct resp_hdr resp; + uint64_t timeout; rte_iova_t iova; }; @@ -20,6 +121,78 @@ softreq_init(struct nitrox_softreq *sr, rte_iova_t iova) sr->iova = iova; } +static int +process_cipher_auth_data(struct nitrox_softreq *sr) +{ + RTE_SET_USED(sr); + return 0; +} + +static int +process_softreq(struct nitrox_softreq *sr) +{ + struct nitrox_crypto_ctx *ctx = sr->ctx; + int err = 0; + + switch (ctx->nitrox_chain) { + case NITROX_CHAIN_CIPHER_AUTH: + case NITROX_CHAIN_AUTH_CIPHER: + err = process_cipher_auth_data(sr); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int +nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op, + struct nitrox_crypto_ctx *ctx, + struct nitrox_softreq *sr) +{ + RTE_SET_USED(qno); + softreq_init(sr, sr->iova); + sr->ctx = ctx; + sr->op = op; + process_softreq(sr); + sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz(); + return 0; +} + +int +nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op) +{ + uint64_t cc; + uint64_t orh; + int err; + + cc = *(volatile uint64_t *)(&sr->resp.completion); + orh = *(volatile uint64_t *)(&sr->resp.orh); + if (cc != PENDING_SIG) + err = 0; + else if ((orh != PENDING_SIG) && (orh & 0xff)) + err = orh & 0xff; + else if (rte_get_timer_cycles() >= sr->timeout) + err = 0xff; + else + return -EAGAIN; + + if (unlikely(err)) + NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err, + sr->resp.orh); + + *op = sr->op; + return err; +} + +void * +nitrox_sym_instr_addr(struct nitrox_softreq *sr) +{ + return &sr->instr; +} + static void req_pool_obj_init(__rte_unused struct rte_mempool *mp, __rte_unused void *opaque, void *obj, diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.h b/drivers/crypto/nitrox/nitrox_sym_reqmgr.h index 5953c958c1..fa2637bdb8 100644 --- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.h +++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.h @@ -5,6 +5,16 @@ #ifndef _NITROX_SYM_REQMGR_H_ #define _NITROX_SYM_REQMGR_H_ +#include "nitrox_sym_ctx.h" + +struct nitrox_qp; +struct nitrox_softreq; + +int nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op, + struct nitrox_crypto_ctx *ctx, + struct nitrox_softreq *sr); +int nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op); +void *nitrox_sym_instr_addr(struct nitrox_softreq *sr); struct rte_mempool *nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs, uint16_t qp_id, int socket_id);