uint64_t ei3;
} ist;
uint8_t *rptr;
+ const struct otx2_cpt_qp *qp;
/** Control path fields */
uint64_t time_out;
return -ENOTSUP;
}
+static __rte_always_inline void __rte_hot
+otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
+ struct cpt_request_info *req,
+ void *lmtline)
+{
+ union cpt_inst_s inst;
+ uint64_t lmt_status;
+
+ inst.u[0] = 0;
+ inst.s9x.res_addr = req->comp_baddr;
+ inst.u[2] = 0;
+ inst.u[3] = 0;
+
+ inst.s9x.ei0 = req->ist.ei0;
+ inst.s9x.ei1 = req->ist.ei1;
+ inst.s9x.ei2 = req->ist.ei2;
+ inst.s9x.ei3 = req->ist.ei3;
+
+ inst.s9x.qord = 1;
+ inst.s9x.grp = qp->ev.queue_id;
+ inst.s9x.tt = qp->ev.sched_type;
+ inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
+ qp->ev.flow_id;
+ inst.s9x.wq_ptr = (uint64_t)req >> 3;
+ req->qp = qp;
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy(lmtline, &inst, sizeof(inst));
+
+ /*
+ * Make sure compiler does not reorder memcpy and ldeor.
+ * LMTST transactions are always flushed from the write
+ * buffer immediately, a DMB is not required to push out
+ * LMTSTs.
+ */
+ rte_io_wmb();
+ lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
+ } while (lmt_status == 0);
+
+}
+
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
union cpt_inst_s inst;
uint64_t lmt_status;
+ if (qp->ca_enable) {
+ otx2_ca_enqueue_req(qp, req, lmtline);
+ return 0;
+ }
+
if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
return -EAGAIN;
INTERNAL {
global:
+ otx2_cryptodev_driver_id;
otx2_cpt_af_reg_read;
otx2_cpt_af_reg_write;
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
+#define _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eventdev.h>
+
+#include "cpt_pmd_logs.h"
+#include "cpt_ucode.h"
+
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_ops_helper.h"
+#include "otx2_cryptodev_qp.h"
+
+static inline void
+otx2_ca_deq_post_process(const struct otx2_cpt_qp *qp,
+ struct rte_crypto_op *cop, uintptr_t *rsp,
+ uint8_t cc)
+{
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (likely(cc == NO_ERR)) {
+ /* Verify authentication data if required */
+ if (unlikely(rsp[2]))
+ compl_auth_verify(cop, (uint8_t *)rsp[2],
+ rsp[3]);
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ if (cc == ERR_GC_ICV_MISCOMPARE)
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ sym_session_clear(otx2_cryptodev_driver_id,
+ cop->sym->session);
+ memset(cop->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session));
+ rte_mempool_put(qp->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
+ }
+
+}
+
+static inline uint64_t
+otx2_handle_crypto_event(uint64_t get_work1)
+{
+ struct cpt_request_info *req;
+ struct rte_crypto_op *cop;
+ uintptr_t *rsp;
+ void *metabuf;
+ uint8_t cc;
+
+ req = (struct cpt_request_info *)(get_work1);
+ cc = otx2_cpt_compcode_get(req);
+
+ rsp = req->op;
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ otx2_ca_deq_post_process(req->qp, cop, rsp, cc);
+
+ rte_mempool_put(req->qp->meta_info.pool, metabuf);
+
+ return (uint64_t)(cop);
+}
+#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_ */
#include <otx2_common.h>
#include "otx2_evdev.h"
+#include "otx2_evdev_crypto_adptr_dp.h"
#include "otx2_ethdev_sec_tx.h"
/* SSO Operations */
ws->cur_tt = event.sched_type;
ws->cur_grp = event.queue_id;
- if (event.sched_type != SSO_TT_EMPTY &&
- event.event_type == RTE_EVENT_TYPE_ETHDEV) {
- otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
- (uint32_t) event.get_work0, flags, lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
- + OTX2_SSO_WQE_SG_PTR);
- otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
- flags, (uint64_t *)tstamp_ptr);
- get_work1 = mbuf;
+ if (event.sched_type != SSO_TT_EMPTY) {
+ if ((flags & NIX_RX_OFFLOAD_SECURITY_F) &&
+ (event.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ get_work1 = otx2_handle_crypto_event(get_work1);
+ } else if (event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags,
+ lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
+ get_work1) +
+ OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ ws->tstamp, flags,
+ (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
}
ev->event = event.get_work0;
#include <otx2_common.h>
#include "otx2_evdev.h"
+#include "otx2_evdev_crypto_adptr_dp.h"
/* SSO Operations */
static __rte_always_inline uint16_t
ws->cur_tt = event.sched_type;
ws->cur_grp = event.queue_id;
- if (event.sched_type != SSO_TT_EMPTY &&
- event.event_type == RTE_EVENT_TYPE_ETHDEV) {
- uint8_t port = event.sub_event_type;
+ if (event.sched_type != SSO_TT_EMPTY) {
+ if ((flags & NIX_RX_OFFLOAD_SECURITY_F) &&
+ (event.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ get_work1 = otx2_handle_crypto_event(get_work1);
+ } else if (event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = event.sub_event_type;
- event.sub_event_type = 0;
- otx2_wqe_to_mbuf(get_work1, mbuf, port,
- event.flow_id, flags, lookup_mem);
- /* Extracting tstamp, if PTP enabled. CGX will prepend the
- * timestamp at starting of packet data and it can be derieved
- * from WQE 9 dword which corresponds to SG iova.
- * rte_pktmbuf_mtod_offset can be used for this purpose but it
- * brings down the performance as it reads mbuf->buf_addr which
- * is not part of cache in general fast path.
- */
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
- + OTX2_SSO_WQE_SG_PTR);
- otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, flags,
- (uint64_t *)tstamp_ptr);
- get_work1 = mbuf;
+ event.sub_event_type = 0;
+ otx2_wqe_to_mbuf(get_work1, mbuf, port,
+ event.flow_id, flags, lookup_mem);
+ /* Extracting tstamp, if PTP enabled. CGX will prepend
+ * the timestamp at starting of packet data and it can
+ * be derieved from WQE 9 dword which corresponds to SG
+ * iova.
+ * rte_pktmbuf_mtod_offset can be used for this purpose
+ * but it brings down the performance as it reads
+ * mbuf->buf_addr which is not part of cache in general
+ * fast path.
+ */
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
+ get_work1) +
+ OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+ flags, (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
}
ev->event = event.get_work0;