Added a symmetric crypto PMD for Marvell NITROX V security processor.
See the :doc:`../cryptodevs/nitrox` guide for more details on this new
+* **Added asymmetric support to Marvell OCTEON TX crypto PMD.**
+
+ Added support for asymmetric operations in Marvell OCTEON TX cypto PMD.
+ Supports RSA and modexp operations.
+
* **Updated NXP crypto PMDs for PDCP support.**
PDCP support is added to DPAA_SEC and DPAA2_SEC PMDs using rte_security APIs.
uint64_t ei2;
uint64_t ei3;
} ist;
+ uint8_t *rptr;
/** Control path fields */
uint64_t time_out;
#define CPT_MAJOR_OP_KASUMI 0x38
#define CPT_MAJOR_OP_MISC 0x01
+/* AE opcodes */
+#define CPT_MAJOR_OP_MODEX 0x03
+#define CPT_MINOR_OP_MODEX 0x01
+#define CPT_MINOR_OP_PKCS_ENC 0x02
+#define CPT_MINOR_OP_PKCS_ENC_CRT 0x03
+#define CPT_MINOR_OP_PKCS_DEC 0x04
+#define CPT_MINOR_OP_PKCS_DEC_CRT 0x05
+#define CPT_MINOR_OP_MODEX_CRT 0x06
+
+#define CPT_BLOCK_TYPE1 0
+#define CPT_BLOCK_TYPE2 1
+
#define CPT_BYTE_16 16
#define CPT_BYTE_24 24
#define CPT_BYTE_32 32
} fc_params_t;
+/*
+ * Parameters for asymmetric operations
+ */
+struct asym_op_params {
+ struct cpt_request_info *req;
+ phys_addr_t meta_buf;
+};
+
/*
* Parameters for digest
* generate requests
#include <rte_crypto_asym.h>
#include <rte_malloc.h>
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
#include "cpt_mcode_defines.h"
static __rte_always_inline void
}
}
+static __rte_always_inline void
+cpt_fill_req_comp_addr(struct cpt_request_info *req, buf_ptr_t addr)
+{
+ void *completion_addr = RTE_PTR_ALIGN(addr.vaddr, 16);
+
+ /* Pointer to cpt_res_s, updated by CPT */
+ req->completion_addr = (volatile uint64_t *)completion_addr;
+ req->comp_baddr = addr.dma_addr +
+ RTE_PTR_DIFF(completion_addr, addr.vaddr);
+ *(req->completion_addr) = COMPLETION_CODE_INIT;
+}
+
+static __rte_always_inline int
+cpt_modex_prep(struct asym_op_params *modex_params,
+ struct rte_crypto_modex_xform *mod)
+{
+ struct cpt_request_info *req = modex_params->req;
+ phys_addr_t mphys = modex_params->meta_buf;
+ uint32_t exp_len = mod->exponent.length;
+ uint32_t mod_len = mod->modulus.length;
+ struct rte_crypto_mod_op_param mod_op;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t base_len;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting modex op form params->req->op[1]->asym->modex */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ mod_op = ((struct rte_crypto_op *)*op)->asym->modex;
+
+ base_len = mod_op.base.length;
+ if (unlikely(base_len > mod_len)) {
+ CPT_LOG_DP_ERR("Base length greater than modulus length is not supported");
+ (*op)->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -ENOTSUP;
+ }
+
+ total_key_len = mod_len + exp_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, mod->modulus.data, total_key_len);
+ dptr += total_key_len;
+ memcpy(dptr, mod_op.base.data, base_len);
+ dptr += base_len;
+ dlen = total_key_len + base_len;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ opcode.s.minor = CPT_MINOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.param2 = exp_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+ return 0;
+}
+
+static __rte_always_inline void
+cpt_rsa_prep(struct asym_op_params *rsa_params,
+ struct rte_crypto_rsa_xform *rsa,
+ rte_crypto_param *crypto_param)
+{
+ struct cpt_request_info *req = rsa_params->req;
+ phys_addr_t mphys = rsa_params->meta_buf;
+ struct rte_crypto_rsa_op_param rsa_op;
+ uint32_t mod_len = rsa->n.length;
+ uint32_t exp_len = rsa->e.length;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t in_size;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting rsa op form params->req->op[1]->asym->rsa */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
+ total_key_len = mod_len + exp_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, rsa->n.data, total_key_len);
+ dptr += total_key_len;
+
+ in_size = crypto_param->length;
+ memcpy(dptr, crypto_param->data, in_size);
+
+ dptr += in_size;
+ dlen = total_key_len + in_size;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ /* Use mod_exp operation for no_padding type */
+ opcode.s.minor = CPT_MINOR_OP_MODEX;
+ vq_cmd_w0.s.param2 = exp_len;
+ } else {
+ if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
+ /* Public key encrypt, use BT2*/
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
+ ((uint16_t)(exp_len) << 1);
+ } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
+ /* Public key decrypt, use BT1 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
+ /* + 2 for decrypted len */
+ rlen += 2;
+ }
+ }
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline void
+cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
+ struct rte_crypto_rsa_xform *rsa,
+ rte_crypto_param *crypto_param)
+{
+ struct cpt_request_info *req = rsa_params->req;
+ phys_addr_t mphys = rsa_params->meta_buf;
+ uint32_t qInv_len = rsa->qt.qInv.length;
+ struct rte_crypto_rsa_op_param rsa_op;
+ uint32_t dP_len = rsa->qt.dP.length;
+ uint32_t dQ_len = rsa->qt.dQ.length;
+ uint32_t p_len = rsa->qt.p.length;
+ uint32_t q_len = rsa->qt.q.length;
+ uint32_t mod_len = rsa->n.length;
+ struct rte_crypto_op **op;
+ vq_cmd_word0_t vq_cmd_w0;
+ uint64_t total_key_len;
+ opcode_info_t opcode;
+ uint32_t dlen, rlen;
+ uint32_t in_size;
+ buf_ptr_t caddr;
+ uint8_t *dptr;
+
+ /* Extracting rsa op form params->req->op[1]->asym->rsa */
+ op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
+ rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
+ total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
+
+ /* Input buffer */
+ dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
+ memcpy(dptr, rsa->qt.q.data, total_key_len);
+ dptr += total_key_len;
+
+ in_size = crypto_param->length;
+ memcpy(dptr, crypto_param->data, in_size);
+
+ dptr += in_size;
+ dlen = total_key_len + in_size;
+
+ /* Result buffer */
+ rlen = mod_len;
+
+ if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ /*Use mod_exp operation for no_padding type */
+ opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
+ } else {
+ if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
+ /* Private encrypt, use BT1 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
+ } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+ opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
+ /* Private decrypt, use BT2 */
+ vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
+ /* + 2 for decrypted len */
+ rlen += 2;
+ }
+ }
+
+ /* Setup opcodes */
+ opcode.s.major = CPT_MAJOR_OP_MODEX;
+ vq_cmd_w0.s.opcode = opcode.flags;
+
+ /* GP op header */
+ vq_cmd_w0.s.param1 = mod_len;
+ vq_cmd_w0.s.dlen = dlen;
+
+ /* Filling cpt_request_info structure */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei1 = mphys;
+ req->ist.ei2 = mphys + dlen;
+
+ /* Result pointer to store result data */
+ req->rptr = dptr;
+
+ /* alternate_caddr to write completion status of the microcode */
+ req->alternate_caddr = (uint64_t *)(dptr + rlen);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+
+ /* Preparing completion addr, +1 for completion code */
+ caddr.vaddr = dptr + rlen + 1;
+ caddr.dma_addr = mphys + dlen + rlen + 1;
+
+ cpt_fill_req_comp_addr(req, caddr);
+}
+
+static __rte_always_inline int __hot
+cpt_enqueue_rsa_op(struct rte_crypto_op *op,
+ struct asym_op_params *params,
+ struct cpt_asym_sess_misc *sess)
+{
+ struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+
+ switch (rsa->op_type) {
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->sign);
+ break;
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->message);
+ break;
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->message);
+ break;
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->cipher);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ return 0;
+}
#endif /* _CPT_UCODE_ASYM_H_ */
return 0;
}
+static __rte_always_inline int __hot
+otx_cpt_enq_single_asym(struct cpt_instance *instance,
+ struct rte_crypto_op *op,
+ struct pending_queue *pqueue)
+{
+ struct cpt_qp_meta_info *minfo = &instance->meta_info;
+ struct rte_crypto_asym_op *asym_op = op->asym;
+ struct asym_op_params params = {0};
+ struct cpt_asym_sess_misc *sess;
+ uintptr_t *cop;
+ void *mdata;
+ int ret;
+
+ if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
+ CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
+ return -ENOMEM;
+ }
+
+ sess = get_asym_session_private_data(asym_op->session,
+ otx_cryptodev_driver_id);
+
+ /* Store phys_addr of the mdata to meta_buf */
+ params.meta_buf = rte_mempool_virt2iova(mdata);
+
+ cop = mdata;
+ cop[0] = (uintptr_t)mdata;
+ cop[1] = (uintptr_t)op;
+ cop[2] = cop[3] = 0ULL;
+
+ params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
+ params.req->op = cop;
+
+ /* Adjust meta_buf by crypto_op data and request_info struct */
+ params.meta_buf += (4 * sizeof(uintptr_t)) +
+ sizeof(struct cpt_request_info);
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ret = -EINVAL;
+ goto req_fail;
+ }
+
+ ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("Could not enqueue crypto req");
+ goto req_fail;
+ }
+
+ return 0;
+
+req_fail:
+ free_op_meta(mdata, minfo->pool);
+
+ return ret;
+}
+
static __rte_always_inline int __hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
return ret;
}
+#define OP_TYPE_SYM 0
+#define OP_TYPE_ASYM 1
+
static __rte_always_inline int __hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
- struct pending_queue *pqueue)
+ struct pending_queue *pqueue,
+ const uint8_t op_type)
{
/* Check for the type */
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- return otx_cpt_enq_single_sym(inst, op, pqueue);
- else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
- return otx_cpt_enq_single_sym_sessless(inst, op, pqueue);
+ if (op_type == OP_TYPE_SYM) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ return otx_cpt_enq_single_sym(inst, op, pqueue);
+ else
+ return otx_cpt_enq_single_sym_sessless(inst, op,
+ pqueue);
+ }
+
+ if (op_type == OP_TYPE_ASYM) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ return otx_cpt_enq_single_asym(inst, op, pqueue);
+ }
/* Should not reach here */
- return -EINVAL;
+ return -ENOTSUP;
}
-static uint16_t
-otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+static __rte_always_inline uint16_t __hot
+otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
+ const uint8_t op_type)
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
uint16_t count;
while (likely(count < nb_ops)) {
/* Enqueue single op */
- ret = otx_cpt_enq_single(instance, ops[count], pqueue);
+ ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
if (unlikely(ret))
break;
return count;
}
-static __rte_always_inline void
-otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
+static uint16_t
+otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
+}
+
+static uint16_t
+otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
+}
+
+static inline void
+otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
+ struct rte_crypto_rsa_xform *rsa_ctx)
+
+{
+ struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+ switch (rsa->op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ rsa->cipher.length = rsa_ctx->n.length;
+ memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
+ break;
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
+ rsa->message.length = rsa_ctx->n.length;
+ else {
+ /* Get length of decrypted output */
+ rsa->message.length = rte_cpu_to_be_16
+ (*((uint16_t *)req->rptr));
+
+ /* Offset data pointer by length fields */
+ req->rptr += 2;
+ }
+ memcpy(rsa->message.data, req->rptr, rsa->message.length);
+ break;
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ rsa->sign.length = rsa_ctx->n.length;
+ memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+ break;
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
+ rsa->sign.length = rsa_ctx->n.length;
+ else {
+ /* Get length of decrypted output */
+ rsa->sign.length = rte_cpu_to_be_16
+ (*((uint16_t *)req->rptr));
+
+ /* Offset data pointer by length fields */
+ req->rptr += 2;
+ }
+ memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+
+ if (memcmp(rsa->sign.data, rsa->message.data,
+ rsa->message.length)) {
+ CPT_LOG_DP_ERR("RSA verification failed");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("Invalid RSA operation type");
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+}
+
+static __rte_always_inline void __hot
+otx_cpt_asym_post_process(struct rte_crypto_op *cop,
+ struct cpt_request_info *req)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ struct cpt_asym_sess_misc *sess;
+
+ sess = get_asym_session_private_data(op->session,
+ otx_cryptodev_driver_id);
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ otx_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ op->modex.result.length = sess->mod_ctx.modulus.length;
+ memcpy(op->modex.result.data, req->rptr,
+ op->modex.result.length);
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("Invalid crypto xform type");
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+}
+
+static __rte_always_inline void __hot
+otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
+ const uint8_t op_type)
{
/* H/w has returned success */
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Perform further post processing */
- if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if ((op_type == OP_TYPE_SYM) &&
+ (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
/* Check if auth verify need to be completed */
if (unlikely(rsp[2]))
compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
return;
}
+
+ if ((op_type == OP_TYPE_ASYM) &&
+ (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)) {
+ rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
+ otx_cpt_asym_post_process(cop, (struct cpt_request_info *)rsp);
+ }
+
+ return;
}
-static uint16_t
-otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+static __rte_always_inline uint16_t __hot
+otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
+ const uint8_t op_type)
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
struct cpt_request_info *user_req;
if (likely(cc[i] == 0)) {
/* H/w success pkt. Post process */
- otx_cpt_dequeue_post_process(cop, rsp);
+ otx_cpt_dequeue_post_process(cop, rsp, op_type);
} else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
/* auth data mismatch */
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
return nb_completed;
}
+static uint16_t
+otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
+}
+
+static uint16_t
+otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
+}
+
static struct rte_cryptodev_ops cptvf_ops = {
/* Device related operations */
.dev_configure = otx_cpt_dev_config,
c_dev->dev_ops = &cptvf_ops;
- c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
- c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
+ if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
+ c_dev->enqueue_burst = otx_cpt_enqueue_sym;
+ c_dev->dequeue_burst = otx_cpt_dequeue_sym;
+ } else {
+ c_dev->enqueue_burst = otx_cpt_enqueue_asym;
+ c_dev->dequeue_burst = otx_cpt_dequeue_asym;
+ }
/* Save dev private data */
c_dev->data->dev_private = cptvf;