#include <rte_bus_pci.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
#include "cpt_ucode.h"
#include "cpt_ucode_asym.h"
+#include "ssovf_worker.h"
+
static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
/* Forward declarations */
rte_mempool_put(sess_mp, priv);
}
-static __rte_always_inline int32_t __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
- if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
- return -EAGAIN;
+ if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN)) {
+ rte_errno = EAGAIN;
+ return NULL;
+ }
fill_cpt_inst(instance, req, cpt_inst_w7);
/* Default mode of software queue */
mark_cpt_inst(instance);
- pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)user_req;
-
- /* We will use soft queue length here to limit requests */
- MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
- pqueue->pending_count += 1;
-
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
"op: %p", user_req, user_req->op);
- return 0;
+ return req;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_asym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
struct cpt_asym_sess_misc *sess;
uintptr_t *cop;
void *mdata;
+ void *req;
int ret;
if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
- return -ENOMEM;
+ rte_errno = ENOMEM;
+ return NULL;
}
sess = get_asym_session_private_data(asym_op->session,
default:
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ret = -EINVAL;
+ rte_errno = EINVAL;
goto req_fail;
}
- ret = otx_cpt_request_enqueue(instance, pqueue, params.req,
+ req = otx_cpt_request_enqueue(instance, pqueue, params.req,
sess->cpt_inst_w7);
-
- if (unlikely(ret)) {
+ if (unlikely(req == NULL)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
goto req_fail;
}
- return 0;
+ return req;
req_fail:
free_op_meta(mdata, minfo->pool);
- return ret;
+ return NULL;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
struct cpt_request_info *prep_req;
void *mdata = NULL;
int ret = 0;
+ void *req;
uint64_t cpt_op;
sess = (struct cpt_sess_misc *)
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
- return ret;
+ return NULL;
}
/* Enqueue prepared instruction to h/w */
- ret = otx_cpt_request_enqueue(instance, pqueue, prep_req,
+ req = otx_cpt_request_enqueue(instance, pqueue, prep_req,
sess->cpt_inst_w7);
-
- if (unlikely(ret)) {
+ if (unlikely(req == NULL))
/* Buffer allocated for request preparation need to be freed */
free_op_meta(mdata, instance->meta_info.pool);
- return ret;
- }
- return 0;
+ return req;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pend_q)
const int driver_id = otx_cryptodev_driver_id;
struct rte_crypto_sym_op *sym_op = op->sym;
struct rte_cryptodev_sym_session *sess;
+ void *req;
int ret;
/* Create temporary session */
sess = rte_cryptodev_sym_session_create(instance->sess_mp);
- if (sess == NULL)
- return -ENOMEM;
+ if (sess == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
ret = sym_session_configure(driver_id, sym_op->xform, sess,
instance->sess_mp_priv);
sym_op->session = sess;
- ret = otx_cpt_enq_single_sym(instance, op, pend_q);
+ req = otx_cpt_enq_single_sym(instance, op, pend_q);
- if (unlikely(ret))
+ if (unlikely(req == NULL))
goto priv_put;
- return 0;
+ return req;
priv_put:
sym_session_clear(driver_id, sess);
sess_put:
rte_mempool_put(instance->sess_mp, sess);
- return ret;
+ return NULL;
}
#define OP_TYPE_SYM 0
#define OP_TYPE_ASYM 1
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void *__rte_hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
struct pending_queue *pqueue,
}
/* Should not reach here */
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return NULL;
}
static __rte_always_inline uint16_t __rte_hot
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
uint16_t count;
- int ret;
+ void *req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct pending_queue *pqueue = &cptvf->pqueue;
while (likely(count < nb_ops)) {
/* Enqueue single op */
- ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
+ req = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
- if (unlikely(ret))
+ if (unlikely(req == NULL))
break;
+
+ pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)req;
+ MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
+ pqueue->pending_count += 1;
count++;
}
otx_cpt_ring_dbell(instance, count);
return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+static __rte_always_inline void
+submit_request_to_sso(struct ssows *ws, uintptr_t req,
+ struct rte_event *rsp_info)
+{
+ uint64_t add_work;
+
+ add_work = rsp_info->flow_id | (RTE_EVENT_TYPE_CRYPTODEV << 28) |
+ ((uint64_t)(rsp_info->sched_type) << 32);
+
+ if (!rsp_info->sched_type)
+ ssows_head_wait(ws);
+
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
+}
+
+static inline union rte_event_crypto_metadata *
+get_event_crypto_mdata(struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ ec_mdata = rte_cryptodev_sym_session_get_user_data(
+ op->sym->session);
+ else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ op->private_data_offset)
+ ec_mdata = (union rte_event_crypto_metadata *)
+ ((uint8_t *)op + op->private_data_offset);
+ else
+ return NULL;
+
+ return ec_mdata;
+}
+
+uint16_t __rte_hot
+otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+ struct cpt_instance *instance;
+ struct cpt_request_info *req;
+ struct rte_event *rsp_info;
+ uint8_t op_type, cdev_id;
+ uint16_t qp_id;
+
+ ec_mdata = get_event_crypto_mdata(op);
+ if (unlikely(ec_mdata == NULL)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ cdev_id = ec_mdata->request_info.cdev_id;
+ qp_id = ec_mdata->request_info.queue_pair_id;
+ rsp_info = &ec_mdata->response_info;
+ instance = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+
+ if (unlikely(!instance->ca_enabled)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ op_type = op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
+ OP_TYPE_ASYM;
+ req = otx_cpt_enq_single(instance, op,
+ &((struct cpt_vf *)instance)->pqueue, op_type);
+ if (unlikely(req == NULL))
+ return 0;
+
+ otx_cpt_ring_dbell(instance, 1);
+ req->qp = instance;
+ submit_request_to_sso(port, (uintptr_t)req, rsp_info);
+
+ return 1;
+}
+
static inline void
otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
struct rte_crypto_rsa_xform *rsa_ctx)
return;
}
+static inline void
+free_sym_session_data(const struct cpt_instance *instance,
+ struct rte_crypto_op *cop)
+{
+ void *sess_private_data_t = get_sym_session_private_data(
+ cop->sym->session, otx_cryptodev_driver_id);
+ memset(sess_private_data_t, 0, cpt_get_session_size());
+ memset(cop->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session));
+ rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
+ rte_mempool_put(instance->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+}
+
+static __rte_always_inline struct rte_crypto_op *
+otx_cpt_process_response(const struct cpt_instance *instance, uintptr_t *rsp,
+ uint8_t cc, const uint8_t op_type)
+{
+ struct rte_crypto_op *cop;
+ void *metabuf;
+
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ /* Check completion code */
+ if (likely(cc == 0)) {
+ /* H/w success pkt. Post process */
+ otx_cpt_dequeue_post_process(cop, rsp, op_type);
+ } else if (cc == ERR_GC_ICV_MISCOMPARE) {
+ /* auth data mismatch */
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ /* Error */
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
+ free_sym_session_data(instance, cop);
+ free_op_meta(metabuf, instance->meta_info.pool);
+
+ return cop;
+}
+
static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
uint8_t ret;
int nb_completed;
struct pending_queue *pqueue = &cptvf->pqueue;
- struct rte_crypto_op *cop;
- void *metabuf;
- uintptr_t *rsp;
pcount = pqueue->pending_count;
count = (nb_ops > pcount) ? pcount : nb_ops;
nb_completed = i;
for (i = 0; i < nb_completed; i++) {
-
- rsp = (void *)ops[i];
-
if (likely((i + 1) < nb_completed))
rte_prefetch0(ops[i+1]);
- metabuf = (void *)rsp[0];
- cop = (void *)rsp[1];
-
- ops[i] = cop;
-
- /* Check completion code */
-
- if (likely(cc[i] == 0)) {
- /* H/w success pkt. Post process */
- otx_cpt_dequeue_post_process(cop, rsp, op_type);
- } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
- /* auth data mismatch */
- cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- /* Error */
- cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
- }
-
- if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- void *sess_private_data_t =
- get_sym_session_private_data(cop->sym->session,
- otx_cryptodev_driver_id);
- memset(sess_private_data_t, 0,
- cpt_get_session_size());
- memset(cop->sym->session, 0,
- rte_cryptodev_sym_get_existing_header_session_size(
- cop->sym->session));
- rte_mempool_put(instance->sess_mp_priv,
- sess_private_data_t);
- rte_mempool_put(instance->sess_mp, cop->sym->session);
- cop->sym->session = NULL;
- }
- free_op_meta(metabuf, instance->meta_info.pool);
+ ops[i] = otx_cpt_process_response(instance, (void *)ops[i],
+ cc[i], op_type);
}
return nb_completed;
return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+uintptr_t __rte_hot
+otx_crypto_adapter_dequeue(uintptr_t get_work1)
+{
+ const struct cpt_instance *instance;
+ struct cpt_request_info *req;
+ struct rte_crypto_op *cop;
+ uint8_t cc, op_type;
+ uintptr_t *rsp;
+
+ req = (struct cpt_request_info *)get_work1;
+ instance = req->qp;
+ rsp = req->op;
+ cop = (void *)rsp[1];
+ op_type = cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
+ OP_TYPE_ASYM;
+
+ do {
+ cc = check_nb_command_id(
+ req, (struct cpt_instance *)(uintptr_t)instance);
+ } while (cc == ERR_REQ_PENDING);
+
+ cop = otx_cpt_process_response(instance, (void *)req->op, cc, op_type);
+
+ return (uintptr_t)(cop);
+}
+
static struct rte_cryptodev_ops cptvf_ops = {
/* Device related operations */
.dev_configure = otx_cpt_dev_config,