#include "otx_cryptodev_ops.h"
#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
#include "cpt_ucode.h"
#include "cpt_ucode_asym.h"
+static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
+
/* Forward declarations */
static int
/* PMD ops */
static int
-otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
+otx_cpt_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config __rte_unused)
{
+ int ret = 0;
+
CPT_PMD_INIT_FUNC_TRACE();
- return 0;
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
+ /* Initialize shared FPM table */
+ ret = cpt_fpm_init(otx_fpm_iova);
+
+ return ret;
}
static int
CPT_PMD_INIT_FUNC_TRACE();
+ if (c_dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
+ cpt_fpm_clear();
+
otx_cpt_stop_device(cptvf);
}
rte_mempool_put(sess_mp, priv);
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req)
return 0;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_asym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
if (unlikely(ret))
goto req_fail;
break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx_fpm_iova);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
+ sess->ec_ctx.curveid);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+
default:
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
ret = -EINVAL;
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
return 0;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
#define OP_TYPE_SYM 0
#define OP_TYPE_ASYM 1
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
struct pending_queue *pqueue,
return -ENOTSUP;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
{
}
}
-static __rte_always_inline void __hot
+static __rte_always_inline void
+otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
+ struct cpt_request_info *req,
+ struct cpt_asym_ec_ctx *ec)
+
+{
+ int prime_len = ec_grp[ec->curveid].prime.length;
+
+ if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+ return;
+
+ /* Separate out sign r and s components */
+ memcpy(ecdsa->r.data, req->rptr, prime_len);
+ memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ ecdsa->r.length = prime_len;
+ ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
+ struct cpt_request_info *req,
+ struct cpt_asym_ec_ctx *ec)
+{
+ int prime_len = ec_grp[ec->curveid].prime.length;
+
+ memcpy(ecpm->r.x.data, req->rptr, prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ ecpm->r.x.length = prime_len;
+ ecpm->r.y.length = prime_len;
+}
+
+static __rte_always_inline void __rte_hot
otx_cpt_asym_post_process(struct rte_crypto_op *cop,
struct cpt_request_info *req)
{
memcpy(op->modex.result.data, req->rptr,
op->modex.result.length);
break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ otx_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ otx_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
+ break;
default:
CPT_LOG_DP_DEBUG("Invalid crypto xform type");
cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
}
}
-static __rte_always_inline void __hot
+static __rte_always_inline void __rte_hot
otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
const uint8_t op_type)
{
return;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
{
.stats_reset = otx_cpt_stats_reset,
.queue_pair_setup = otx_cpt_que_pair_setup,
.queue_pair_release = otx_cpt_que_pair_release,
- .queue_pair_count = NULL,
/* Crypto related operations */
.sym_session_get_size = otx_cpt_get_session_size,
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
- RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
break;
default:
/* Feature not supported. Abort */