deps += ['common_cpt']
deps += ['common_octeontx2']
deps += ['ethdev']
+deps += ['eventdev']
deps += ['security']
name = 'octeontx2_crypto'
#define OTX2_CPT_LF_NQ(a) (0x400ull | (uint64_t)(a) << 3)
#define OTX2_CPT_AF_LF_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
+#define OTX2_CPT_AF_LF_CTL2(a) (0x29000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_LF_BAR2(vf, q_id) \
((vf)->otx2_dev.bar2 + \
} s;
};
+union otx2_cpt_af_lf_ctl2 {
+ uint64_t u;
+ struct {
+ uint64_t exe_no_swap : 1;
+ uint64_t exe_ldwb : 1;
+ uint64_t reserved_2_31 : 30;
+ uint64_t sso_pf_func : 16;
+ uint64_t nix_pf_func : 16;
+ } s;
+};
+
union otx2_cpt_lf_q_grp_ptr {
uint64_t u;
struct {
int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
+__rte_internal
int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t *val);
+__rte_internal
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint64_t val);
#define _OTX2_CRYPTODEV_QP_H_
#include <rte_common.h>
+#include <rte_eventdev.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
/**< Metabuf info required to support operations on the queue pair */
rte_iova_t iq_dma_addr;
/**< Instruction queue address */
+ struct rte_event ev;
+ /**< Event information required for binding cryptodev queue to
+ * eventdev queue. Used by crypto adapter.
+ */
+ uint8_t ca_enable;
+ /**< Set when queue pair is added to crypto adapter */
};
#endif /* _OTX2_CRYPTODEV_QP_H_ */
DPDK_21 {
local: *;
};
+
+INTERNAL {
+ global:
+
+ otx2_cpt_af_reg_read;
+ otx2_cpt_af_reg_write;
+
+ local: *;
+};
'otx2_worker_dual.c',
'otx2_evdev.c',
'otx2_evdev_adptr.c',
+ 'otx2_evdev_crypto_adptr.c',
'otx2_evdev_irq.c',
'otx2_evdev_selftest.c',
'otx2_tim_evdev.c',
endif
endforeach
-deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2']
+deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2', 'pmd_octeontx2_crypto']
includes += include_directories('../../crypto/octeontx2')
+includes += include_directories('../../common/cpt')
.timer_adapter_caps_get = otx2_tim_caps_get,
+ .crypto_adapter_caps_get = otx2_ca_caps_get,
+ .crypto_adapter_queue_pair_add = otx2_ca_qp_add,
+ .crypto_adapter_queue_pair_del = otx2_ca_qp_del,
+
.xstats_get = otx2_sso_xstats_get,
.xstats_reset = otx2_sso_xstats_reset,
.xstats_get_names = otx2_sso_xstats_get_names,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id);
+/* Event crypto adapter API's */
+int otx2_ca_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev, uint32_t *caps);
+
+int otx2_ca_qp_add(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev, int32_t queue_pair_id,
+ const struct rte_event *event);
+
+int otx2_ca_qp_del(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+
/* Clean up API's */
typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
+
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_qp.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_evdev.h"
+
+int
+otx2_ca_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev, uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cdev);
+
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND |
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW;
+
+ return 0;
+}
+
+int
+otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id, const struct rte_event *event)
+{
+ struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
+ union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
+ struct otx2_cpt_qp *qp;
+ int ret;
+
+ qp = cdev->data->queue_pairs[queue_pair_id];
+
+ qp->ca_enable = 1;
+ rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
+
+ ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
+ &af_lf_ctl2.u);
+ if (ret)
+ return ret;
+
+ af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
+ ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
+ af_lf_ctl2.u);
+ if (ret)
+ return ret;
+
+ sso_evdev->rx_offloads |= NIX_RX_OFFLOAD_SECURITY_F;
+ sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev);
+
+ return 0;
+}
+
+int
+otx2_ca_qp_del(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id)
+{
+ union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
+ struct otx2_cpt_qp *qp;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ qp = cdev->data->queue_pairs[queue_pair_id];
+ qp->ca_enable = 0;
+ memset(&qp->ev, 0, sizeof(struct rte_event));
+
+ ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
+ &af_lf_ctl2.u);
+ if (ret)
+ return ret;
+
+ af_lf_ctl2.s.sso_pf_func = 0;
+ ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
+ af_lf_ctl2.u);
+
+ return ret;
+}