#include <rte_alarm.h>
#include <rte_bus_pci.h>
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
+#include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
#include "cpt_ucode.h"
#include "cpt_ucode_asym.h"
+#include "ssovf_worker.h"
+
static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
/* Forward declarations */
if (xform->next) {
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
+ xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
return -ENOTSUP;
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
+ xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
return -ENOTSUP;
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *pool)
{
+ struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
+ vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
- offsetof(struct cpt_ctx, fctx));
+ offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
goto priv_put;
}
+ if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
+ cpt_mac_len_verify(&temp_xform->auth)) {
+ CPT_LOG_ERR("MAC length is not supported");
+ struct cpt_ctx *ctx = SESS_PRIV(misc);
+ if (ctx->auth_key != NULL) {
+ rte_free(ctx->auth_key);
+ ctx->auth_key = NULL;
+ }
+ ret = -ENOTSUP;
+ goto priv_put;
+ }
+
set_sym_session_private_data(sess, driver_id, priv);
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+ mc_ctx);
+
+ misc->cpt_inst_w7 = vq_cmd_w3.u64;
+
return 0;
priv_put:
sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
{
void *priv = get_sym_session_private_data(sess, driver_id);
+ struct cpt_sess_misc *misc;
struct rte_mempool *pool;
+ struct cpt_ctx *ctx;
if (priv == NULL)
return;
+ misc = priv;
+ ctx = SESS_PRIV(misc);
+
+ rte_free(ctx->auth_key);
+
memset(priv, 0, cpt_get_session_size());
pool = rte_mempool_from_obj(priv);
}
static int
-otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
+otx_cpt_asym_session_cfg(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_asym_xform *xform __rte_unused,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *pool)
+ struct rte_cryptodev_asym_session *sess)
{
- struct cpt_asym_sess_misc *priv;
+ struct cpt_asym_sess_misc *priv = (struct cpt_asym_sess_misc *)
+ sess->sess_private_data;
int ret;
CPT_PMD_INIT_FUNC_TRACE();
- if (rte_mempool_get(pool, (void **)&priv)) {
- CPT_LOG_ERR("Could not allocate session private data");
- return -ENOMEM;
- }
-
- memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
-
ret = cpt_fill_asym_session_parameters(priv, xform);
if (ret) {
CPT_LOG_ERR("Could not configure session parameters");
-
- /* Return session to mempool */
- rte_mempool_put(pool, priv);
return ret;
}
- set_asym_session_private_data(sess, dev->driver_id, priv);
+ priv->cpt_inst_w7 = 0;
+
return 0;
}
struct rte_cryptodev_asym_session *sess)
{
struct cpt_asym_sess_misc *priv;
- struct rte_mempool *sess_mp;
CPT_PMD_INIT_FUNC_TRACE();
- priv = get_asym_session_private_data(sess, dev->driver_id);
+ priv = (struct cpt_asym_sess_misc *) sess->sess_private_data;
if (priv == NULL)
return;
/* Free resources allocated during session configure */
cpt_free_asym_session_parameters(priv);
memset(priv, 0, otx_cpt_asym_session_size_get(dev));
- sess_mp = rte_mempool_from_obj(priv);
- set_asym_session_private_data(sess, dev->driver_id, NULL);
- rte_mempool_put(sess_mp, priv);
}
-static __rte_always_inline int32_t __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
- struct pending_queue *pqueue,
- void *req)
+ void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
- if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
- return -EAGAIN;
-
- fill_cpt_inst(instance, req);
+ fill_cpt_inst(instance, req, cpt_inst_w7);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
/* Default mode of software queue */
mark_cpt_inst(instance);
- pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
-
- /* We will use soft queue length here to limit requests */
- MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
- pqueue->pending_count += 1;
-
CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
"op: %p", user_req, user_req->op);
- return 0;
+ return req;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_asym(struct cpt_instance *instance,
- struct rte_crypto_op *op,
- struct pending_queue *pqueue)
+ struct rte_crypto_op *op)
{
struct cpt_qp_meta_info *minfo = &instance->meta_info;
struct rte_crypto_asym_op *asym_op = op->asym;
struct cpt_asym_sess_misc *sess;
uintptr_t *cop;
void *mdata;
+ void *req;
int ret;
if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
- return -ENOMEM;
+ rte_errno = ENOMEM;
+ return NULL;
}
- sess = get_asym_session_private_data(asym_op->session,
- otx_cryptodev_driver_id);
+ sess = (struct cpt_asym_sess_misc *)
+ asym_op->session->sess_private_data;
/* Store phys_addr of the mdata to meta_buf */
params.meta_buf = rte_mempool_virt2iova(mdata);
default:
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ret = -EINVAL;
+ rte_errno = EINVAL;
goto req_fail;
}
- ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
-
- if (unlikely(ret)) {
+ req = otx_cpt_request_enqueue(instance, params.req, sess->cpt_inst_w7);
+ if (unlikely(req == NULL)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
goto req_fail;
}
- return 0;
+ return req;
req_fail:
free_op_meta(mdata, minfo->pool);
- return ret;
+ return NULL;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
- struct rte_crypto_op *op,
- struct pending_queue *pqueue)
+ struct rte_crypto_op *op)
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
- void *prep_req, *mdata = NULL;
+ struct cpt_request_info *prep_req;
+ void *mdata = NULL;
int ret = 0;
+ void *req;
uint64_t cpt_op;
sess = (struct cpt_sess_misc *)
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
- &prep_req);
+ (void **)&prep_req);
else
ret = fill_digest_params(op, sess, &instance->meta_info,
- &mdata, &prep_req);
+ &mdata, (void **)&prep_req);
if (unlikely(ret)) {
- CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
+ CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
- return ret;
+ return NULL;
}
/* Enqueue prepared instruction to h/w */
- ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
-
- if (unlikely(ret)) {
+ req = otx_cpt_request_enqueue(instance, prep_req, sess->cpt_inst_w7);
+ if (unlikely(req == NULL))
/* Buffer allocated for request preparation need to be freed */
free_op_meta(mdata, instance->meta_info.pool);
- return ret;
- }
- return 0;
+ return req;
}
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void * __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
- struct rte_crypto_op *op,
- struct pending_queue *pend_q)
+ struct rte_crypto_op *op)
{
const int driver_id = otx_cryptodev_driver_id;
struct rte_crypto_sym_op *sym_op = op->sym;
struct rte_cryptodev_sym_session *sess;
+ void *req;
int ret;
/* Create temporary session */
-
- if (rte_mempool_get(instance->sess_mp, (void **)&sess))
- return -ENOMEM;
+ sess = rte_cryptodev_sym_session_create(instance->sess_mp);
+ if (sess == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
ret = sym_session_configure(driver_id, sym_op->xform, sess,
instance->sess_mp_priv);
sym_op->session = sess;
- ret = otx_cpt_enq_single_sym(instance, op, pend_q);
-
- if (unlikely(ret))
+ /* Enqueue op with the tmp session set */
+ req = otx_cpt_enq_single_sym(instance, op);
+ if (unlikely(req == NULL))
goto priv_put;
- return 0;
+ return req;
priv_put:
sym_session_clear(driver_id, sess);
sess_put:
rte_mempool_put(instance->sess_mp, sess);
- return ret;
+ return NULL;
}
#define OP_TYPE_SYM 0
#define OP_TYPE_ASYM 1
-static __rte_always_inline int __rte_hot
+static __rte_always_inline void *__rte_hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
- struct pending_queue *pqueue,
const uint8_t op_type)
{
/* Check for the type */
if (op_type == OP_TYPE_SYM) {
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- return otx_cpt_enq_single_sym(inst, op, pqueue);
+ return otx_cpt_enq_single_sym(inst, op);
else
- return otx_cpt_enq_single_sym_sessless(inst, op,
- pqueue);
+ return otx_cpt_enq_single_sym_sessless(inst, op);
}
if (op_type == OP_TYPE_ASYM) {
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- return otx_cpt_enq_single_asym(inst, op, pqueue);
+ return otx_cpt_enq_single_asym(inst, op);
}
/* Should not reach here */
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return NULL;
}
static __rte_always_inline uint16_t __rte_hot
const uint8_t op_type)
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
- uint16_t count;
- int ret;
+ uint16_t count, free_slots;
+ void *req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct pending_queue *pqueue = &cptvf->pqueue;
- count = DEFAULT_CMD_QLEN - pqueue->pending_count;
- if (nb_ops > count)
- nb_ops = count;
+ free_slots = pending_queue_free_slots(pqueue, DEFAULT_CMD_QLEN,
+ DEFAULT_CMD_QRSVD_SLOTS);
+ if (nb_ops > free_slots)
+ nb_ops = free_slots;
count = 0;
while (likely(count < nb_ops)) {
/* Enqueue single op */
- ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
+ req = otx_cpt_enq_single(instance, ops[count], op_type);
- if (unlikely(ret))
+ if (unlikely(req == NULL))
break;
+
+ pending_queue_push(pqueue, req, count, DEFAULT_CMD_QLEN);
count++;
}
- otx_cpt_ring_dbell(instance, count);
+
+ if (likely(count)) {
+ pending_queue_commit(pqueue, count, DEFAULT_CMD_QLEN);
+ otx_cpt_ring_dbell(instance, count);
+ }
return count;
}
return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+static __rte_always_inline void
+submit_request_to_sso(struct ssows *ws, uintptr_t req,
+ struct rte_event *rsp_info)
+{
+ uint64_t add_work;
+
+ add_work = rsp_info->flow_id | (RTE_EVENT_TYPE_CRYPTODEV << 28) |
+ ((uint64_t)(rsp_info->sched_type) << 32);
+
+ if (!rsp_info->sched_type)
+ ssows_head_wait(ws);
+
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
+}
+
+uint16_t __rte_hot
+otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+ struct cpt_instance *instance;
+ struct cpt_request_info *req;
+ struct rte_event *rsp_info;
+ uint8_t op_type, cdev_id;
+ uint16_t qp_id;
+
+ ec_mdata = rte_cryptodev_session_event_mdata_get(op);
+ if (unlikely(ec_mdata == NULL)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ cdev_id = ec_mdata->request_info.cdev_id;
+ qp_id = ec_mdata->request_info.queue_pair_id;
+ rsp_info = &ec_mdata->response_info;
+ instance = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+
+ if (unlikely(!instance->ca_enabled)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ op_type = op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
+ OP_TYPE_ASYM;
+ req = otx_cpt_enq_single(instance, op, op_type);
+ if (unlikely(req == NULL))
+ return 0;
+
+ otx_cpt_ring_dbell(instance, 1);
+ req->qp = instance;
+ submit_request_to_sso(port, (uintptr_t)req, rsp_info);
+
+ return 1;
+}
+
static inline void
otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
struct rte_crypto_rsa_xform *rsa_ctx)
memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
break;
case RTE_CRYPTO_ASYM_OP_DECRYPT:
- if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
+ if (rsa->padding.type == RTE_CRYPTO_RSA_PADDING_NONE)
rsa->message.length = rsa_ctx->n.length;
else {
/* Get length of decrypted output */
memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
break;
case RTE_CRYPTO_ASYM_OP_VERIFY:
- if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
+ if (rsa->padding.type == RTE_CRYPTO_RSA_PADDING_NONE)
rsa->sign.length = rsa_ctx->n.length;
else {
/* Get length of decrypted output */
/* Separate out sign r and s components */
memcpy(ecdsa->r.data, req->rptr, prime_len);
- memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecdsa->r.length = prime_len;
ecdsa->s.length = prime_len;
}
int prime_len = ec_grp[ec->curveid].prime.length;
memcpy(ecpm->r.x.data, req->rptr, prime_len);
- memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecpm->r.x.length = prime_len;
ecpm->r.y.length = prime_len;
}
struct rte_crypto_asym_op *op = cop->asym;
struct cpt_asym_sess_misc *sess;
- sess = get_asym_session_private_data(op->session,
- otx_cryptodev_driver_id);
+ sess = (struct cpt_asym_sess_misc *) op->session->sess_private_data;
switch (sess->xfrm_type) {
case RTE_CRYPTO_ASYM_XFORM_RSA:
return;
}
+static inline void
+free_sym_session_data(const struct cpt_instance *instance,
+ struct rte_crypto_op *cop)
+{
+ void *sess_private_data_t = get_sym_session_private_data(
+ cop->sym->session, otx_cryptodev_driver_id);
+ memset(sess_private_data_t, 0, cpt_get_session_size());
+ memset(cop->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session));
+ rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
+ rte_mempool_put(instance->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+}
+
+static __rte_always_inline struct rte_crypto_op *
+otx_cpt_process_response(const struct cpt_instance *instance, uintptr_t *rsp,
+ uint8_t cc, const uint8_t op_type)
+{
+ struct rte_crypto_op *cop;
+ void *metabuf;
+
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ /* Check completion code */
+ if (likely(cc == 0)) {
+ /* H/w success pkt. Post process */
+ otx_cpt_dequeue_post_process(cop, rsp, op_type);
+ } else if (cc == ERR_GC_ICV_MISCOMPARE) {
+ /* auth data mismatch */
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ /* Error */
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
+ free_sym_session_data(instance, cop);
+ free_op_meta(metabuf, instance->meta_info.pool);
+
+ return cop;
+}
+
static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
struct cpt_instance *instance = (struct cpt_instance *)qptr;
struct cpt_request_info *user_req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
- struct rid *rid_e;
uint8_t cc[nb_ops];
int i, count, pcount;
uint8_t ret;
int nb_completed;
struct pending_queue *pqueue = &cptvf->pqueue;
- struct rte_crypto_op *cop;
- void *metabuf;
- uintptr_t *rsp;
- pcount = pqueue->pending_count;
+ pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
+
+ /* Ensure pcount isn't read before data lands */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
count = (nb_ops > pcount) ? pcount : nb_ops;
for (i = 0; i < count; i++) {
- rid_e = &pqueue->rid_queue[pqueue->deq_head];
- user_req = (struct cpt_request_info *)(rid_e->rid);
-
- if (likely((i+1) < count))
- rte_prefetch_non_temporal((void *)rid_e[1].rid);
+ pending_queue_peek(pqueue, (void **) &user_req,
+ DEFAULT_CMD_QLEN, i + 1 < count);
ret = check_nb_command_id(user_req, instance);
CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
user_req, user_req->op, ret);
- MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
- pqueue->pending_count -= 1;
+ pending_queue_pop(pqueue, DEFAULT_CMD_QLEN);
}
nb_completed = i;
for (i = 0; i < nb_completed; i++) {
-
- rsp = (void *)ops[i];
-
if (likely((i + 1) < nb_completed))
rte_prefetch0(ops[i+1]);
- metabuf = (void *)rsp[0];
- cop = (void *)rsp[1];
-
- ops[i] = cop;
-
- /* Check completion code */
-
- if (likely(cc[i] == 0)) {
- /* H/w success pkt. Post process */
- otx_cpt_dequeue_post_process(cop, rsp, op_type);
- } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
- /* auth data mismatch */
- cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- /* Error */
- cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
- }
-
- if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- void *sess_private_data_t =
- get_sym_session_private_data(cop->sym->session,
- otx_cryptodev_driver_id);
- memset(sess_private_data_t, 0,
- cpt_get_session_size());
- memset(cop->sym->session, 0,
- rte_cryptodev_sym_get_existing_header_session_size(
- cop->sym->session));
- rte_mempool_put(instance->sess_mp_priv,
- sess_private_data_t);
- rte_mempool_put(instance->sess_mp, cop->sym->session);
- cop->sym->session = NULL;
- }
- free_op_meta(metabuf, instance->meta_info.pool);
+ ops[i] = otx_cpt_process_response(instance, (void *)ops[i],
+ cc[i], op_type);
}
return nb_completed;
return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+uintptr_t __rte_hot
+otx_crypto_adapter_dequeue(uintptr_t get_work1)
+{
+ const struct cpt_instance *instance;
+ struct cpt_request_info *req;
+ struct rte_crypto_op *cop;
+ uint8_t cc, op_type;
+ uintptr_t *rsp;
+
+ req = (struct cpt_request_info *)get_work1;
+ instance = req->qp;
+ rsp = req->op;
+ cop = (void *)rsp[1];
+ op_type = cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
+ OP_TYPE_ASYM;
+
+ do {
+ cc = check_nb_command_id(
+ req, (struct cpt_instance *)(uintptr_t)instance);
+ } while (cc == ERR_REQ_PENDING);
+
+ cop = otx_cpt_process_response(instance, (void *)req->op, cc, op_type);
+
+ return (uintptr_t)(cop);
+}
+
static struct rte_cryptodev_ops cptvf_ops = {
/* Device related operations */
.dev_configure = otx_cpt_dev_config,
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
- RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
- RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
break;
default:
/* Feature not supported. Abort */