#include <rte_cryptodev_pmd.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
return -ENOTSUP;
}
-static __rte_always_inline void __rte_hot
+static __rte_always_inline int32_t __rte_hot
otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
struct cpt_request_info *req,
void *lmtline,
+ struct rte_crypto_op *op,
uint64_t cpt_inst_w7)
{
+ union rte_event_crypto_metadata *m_data;
union cpt_inst_s inst;
uint64_t lmt_status;
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ op->sym->session);
+ if (m_data == NULL) {
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ op->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)op +
+ op->private_data_offset);
+ } else {
+ return -EINVAL;
+ }
+
inst.u[0] = 0;
inst.s9x.res_addr = req->comp_baddr;
inst.u[2] = 0;
inst.s9x.ei2 = req->ist.ei2;
inst.s9x.ei3 = cpt_inst_w7;
- inst.s9x.qord = 1;
- inst.s9x.grp = qp->ev.queue_id;
- inst.s9x.tt = qp->ev.sched_type;
- inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
- qp->ev.flow_id;
- inst.s9x.wq_ptr = (uint64_t)req >> 3;
+ inst.u[2] = (((RTE_EVENT_TYPE_CRYPTODEV << 28) |
+ m_data->response_info.flow_id) |
+ ((uint64_t)m_data->response_info.sched_type << 32) |
+ ((uint64_t)m_data->response_info.queue_id << 34));
+ inst.u[3] = 1 | (((uint64_t)req >> 3) << 3);
req->qp = qp;
do {
lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
} while (lmt_status == 0);
+ return 0;
}
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
struct cpt_request_info *req,
+ struct rte_crypto_op *op,
uint64_t cpt_inst_w7)
{
void *lmtline = qp->lmtline;
union cpt_inst_s inst;
uint64_t lmt_status;
- if (qp->ca_enable) {
- otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
- return 0;
- }
+ if (qp->ca_enable)
+ return otx2_ca_enqueue_req(qp, req, lmtline, op, cpt_inst_w7);
if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
return -EAGAIN;
goto req_fail;
}
- ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, op,
+ sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
return ret;
}
- ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Free buffer allocated by fill params routines */
return ret;
}
- ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
if (winsz && esn) {
seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
-#include "otx2_evdev_stats.h"
#include "otx2_evdev.h"
+#include "otx2_evdev_crypto_adptr_tx.h"
+#include "otx2_evdev_stats.h"
#include "otx2_irq.h"
#include "otx2_tim_evdev.h"
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
+ event_dev->ca_enqueue = otx2_ssogws_ca_enq;
if (dev->dual_ws) {
event_dev->enqueue = otx2_ssogws_dual_enq;
[!!(dev->tx_offloads &
NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
}
+ event_dev->ca_enqueue = otx2_ssogws_dual_ca_enq;
}
event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
RTE_SET_USED(cdev);
*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND |
- RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW;
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW |
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
return 0;
}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
-#define _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_
-
-#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_eventdev.h>
-
-#include "cpt_pmd_logs.h"
-#include "cpt_ucode.h"
-
-#include "otx2_cryptodev.h"
-#include "otx2_cryptodev_hw_access.h"
-#include "otx2_cryptodev_ops_helper.h"
-#include "otx2_cryptodev_qp.h"
-
-static inline void
-otx2_ca_deq_post_process(const struct otx2_cpt_qp *qp,
- struct rte_crypto_op *cop, uintptr_t *rsp,
- uint8_t cc)
-{
- if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
- if (likely(cc == NO_ERR)) {
- /* Verify authentication data if required */
- if (unlikely(rsp[2]))
- compl_auth_verify(cop, (uint8_t *)rsp[2],
- rsp[3]);
- else
- cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- } else {
- if (cc == ERR_GC_ICV_MISCOMPARE)
- cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- else
- cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
- }
-
- if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- sym_session_clear(otx2_cryptodev_driver_id,
- cop->sym->session);
- memset(cop->sym->session, 0,
- rte_cryptodev_sym_get_existing_header_session_size(
- cop->sym->session));
- rte_mempool_put(qp->sess_mp, cop->sym->session);
- cop->sym->session = NULL;
- }
- }
-
-}
-
-static inline uint64_t
-otx2_handle_crypto_event(uint64_t get_work1)
-{
- struct cpt_request_info *req;
- struct rte_crypto_op *cop;
- uintptr_t *rsp;
- void *metabuf;
- uint8_t cc;
-
- req = (struct cpt_request_info *)(get_work1);
- cc = otx2_cpt_compcode_get(req);
-
- rsp = req->op;
- metabuf = (void *)rsp[0];
- cop = (void *)rsp[1];
-
- otx2_ca_deq_post_process(req->qp, cop, rsp, cc);
-
- rte_mempool_put(req->qp->meta_info.pool, metabuf);
-
- return (uint64_t)(cop);
-}
-#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_DP_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
+#define _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eventdev.h>
+
+#include "cpt_pmd_logs.h"
+#include "cpt_ucode.h"
+
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_ops_helper.h"
+#include "otx2_cryptodev_qp.h"
+
+static inline void
+otx2_ca_deq_post_process(const struct otx2_cpt_qp *qp,
+ struct rte_crypto_op *cop, uintptr_t *rsp,
+ uint8_t cc)
+{
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (likely(cc == NO_ERR)) {
+ /* Verify authentication data if required */
+ if (unlikely(rsp[2]))
+ compl_auth_verify(cop, (uint8_t *)rsp[2],
+ rsp[3]);
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ if (cc == ERR_GC_ICV_MISCOMPARE)
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ sym_session_clear(otx2_cryptodev_driver_id,
+ cop->sym->session);
+ memset(cop->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session));
+ rte_mempool_put(qp->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
+ }
+
+}
+
+static inline uint64_t
+otx2_handle_crypto_event(uint64_t get_work1)
+{
+ struct cpt_request_info *req;
+ struct rte_crypto_op *cop;
+ uintptr_t *rsp;
+ void *metabuf;
+ uint8_t cc;
+
+ req = (struct cpt_request_info *)(get_work1);
+ cc = otx2_cpt_compcode_get(req);
+
+ rsp = req->op;
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ otx2_ca_deq_post_process(req->qp, cop, rsp, cc);
+
+ rte_mempool_put(req->qp->meta_info.pool, metabuf);
+
+ return (uint64_t)(cop);
+}
+#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2021 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
+#define _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_event_crypto_adapter.h>
+#include <rte_eventdev.h>
+
+#include <otx2_cryptodev_qp.h>
+#include <otx2_worker.h>
+
+static inline uint16_t
+otx2_ca_enq(uintptr_t tag_op, const struct rte_event *ev)
+{
+ union rte_event_crypto_metadata *m_data;
+ struct rte_crypto_op *crypto_op;
+ struct rte_cryptodev *cdev;
+ struct otx2_cpt_qp *qp;
+ uint8_t cdev_id;
+ uint16_t qp_id;
+
+ crypto_op = ev->event_ptr;
+ if (crypto_op == NULL)
+ return 0;
+
+ if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ crypto_op->sym->session);
+ if (m_data == NULL)
+ goto free_op;
+
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ crypto_op->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)crypto_op +
+ crypto_op->private_data_offset);
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ } else {
+ goto free_op;
+ }
+
+ cdev = &rte_cryptodevs[cdev_id];
+ qp = cdev->data->queue_pairs[qp_id];
+
+ if (!ev->sched_type)
+ otx2_ssogws_head_wait(tag_op);
+ if (qp->ca_enable)
+ return cdev->enqueue_burst(qp, &crypto_op, 1);
+
+free_op:
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ rte_errno = EINVAL;
+ return 0;
+}
+
+static uint16_t __rte_hot
+otx2_ssogws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct otx2_ssogws *ws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return otx2_ca_enq(ws->tag_op, ev);
+}
+
+static uint16_t __rte_hot
+otx2_ssogws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct otx2_ssogws_dual *ws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return otx2_ca_enq(ws->ws_state[!ws->vws].tag_op, ev);
+}
+#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_ */
#include <otx2_common.h>
#include "otx2_evdev.h"
-#include "otx2_evdev_crypto_adptr_dp.h"
+#include "otx2_evdev_crypto_adptr_rx.h"
#include "otx2_ethdev_sec_tx.h"
/* SSO Operations */
#include <otx2_common.h>
#include "otx2_evdev.h"
-#include "otx2_evdev_crypto_adptr_dp.h"
+#include "otx2_evdev_crypto_adptr_rx.h"
/* SSO Operations */
static __rte_always_inline uint16_t