This patch adds the device control functions.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
# PMD code
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_hw_access.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_ops.c
name = 'octeontx2_crypto'
sources = files('otx2_cryptodev.c',
+ 'otx2_cryptodev_hw_access.c',
'otx2_cryptodev_mbox.c',
'otx2_cryptodev_ops.c')
/**< Base class */
uint16_t max_queues;
/**< Max queues supported */
+ uint8_t nb_queues;
+ /**< Number of crypto queues attached */
+ uint16_t lf_msixoff[OTX2_CPT_MAX_LFS];
+ /**< MSI-X offsets */
+ uint8_t err_intr_registered:1;
+ /**< Are error interrupts registered? */
};
#define CPT_LOGTYPE otx2_cpt_logtype
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_common.h"
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+
+#include "cpt_pmd_logs.h"
+
+static void
+otx2_cpt_lf_err_intr_handler(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint8_t lf_id;
+ uint64_t intr;
+
+ lf_id = (base >> 12) & 0xFF;
+
+ intr = otx2_read64(base + OTX2_CPT_LF_MISC_INT);
+ if (intr == 0)
+ return;
+
+ CPT_LOG_ERR("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, base + OTX2_CPT_LF_MISC_INT);
+}
+
+static void
+otx2_cpt_lf_err_intr_unregister(const struct rte_cryptodev *dev,
+ uint16_t msix_off, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+
+ /* Disable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
+
+ otx2_unregister_irq(handle, otx2_cpt_lf_err_intr_handler, (void *)base,
+ msix_off);
+}
+
+void
+otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ uintptr_t base;
+ uint32_t i;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = OTX2_CPT_LF_BAR2(vf, i);
+ otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
+ }
+
+ vf->err_intr_registered = 0;
+}
+
+static int
+otx2_cpt_lf_err_intr_register(const struct rte_cryptodev *dev,
+ uint16_t msix_off, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int ret;
+
+ /* Disable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
+
+ /* Register error interrupt handler */
+ ret = otx2_register_irq(handle, otx2_cpt_lf_err_intr_handler,
+ (void *)base, msix_off);
+ if (ret)
+ return ret;
+
+ /* Enable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1S);
+
+ return 0;
+}
+
+int
+otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ uint32_t i, j, ret;
+ uintptr_t base;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
+ CPT_LOG_ERR("Invalid CPT LF MSI-X offset: 0x%x",
+ vf->lf_msixoff[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = OTX2_CPT_LF_BAR2(vf, i);
+ ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
+ base);
+ if (ret)
+ goto intr_unregister;
+ }
+
+ vf->err_intr_registered = 1;
+ return 0;
+
+intr_unregister:
+ /* Unregister the ones already registered */
+ for (j = 0; j < i; j++) {
+ base = OTX2_CPT_LF_BAR2(vf, j);
+ otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
+ }
+
+ /*
+ * Failed to register error interrupt. Not returning error as this would
+ * prevent application from enabling larger number of devs.
+ *
+ * This failure is a known issue because otx2_dev_init() initializes
+ * interrupts based on static values from ATF, and the actual number
+ * of interrupts needed (which is based on LFs) can be determined only
+ * after otx2_dev_init() sets up interrupts which includes mbox
+ * interrupts.
+ */
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_HW_ACCESS_H_
+#define _OTX2_CRYPTODEV_HW_ACCESS_H_
+
+#include <rte_cryptodev.h>
+
+#include "otx2_dev.h"
+
+/* Register offsets */
+
+/* CPT LF registers */
+#define OTX2_CPT_LF_MISC_INT 0xb0ull
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S 0xd0ull
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C 0xe0ull
+
+#define OTX2_CPT_LF_BAR2(vf, q_id) \
+ ((vf)->otx2_dev.bar2 + \
+ ((RVU_BLOCK_ADDR_CPT0 << 20) | ((q_id) << 12)))
+
+void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
+
+int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
+
+#endif /* _OTX2_CRYPTODEV_HW_ACCESS_H_ */
*nb_queues = rsp->cpt;
return 0;
}
+
+int
+otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct rsrc_attach_req *req;
+
+ /* Ask AF to attach required LFs */
+
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+
+ /* 1 LF = 1 queue */
+ req->cptlfs = nb_queues;
+
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Update number of attached queues */
+ vf->nb_queues = nb_queues;
+
+ return 0;
+}
+
+int
+otx2_cpt_queues_detach(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+ req->cptlfs = true;
+ req->partial = true;
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Queues have been detached */
+ vf->nb_queues = 0;
+
+ return 0;
+}
+
+int
+otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct msix_offset_rsp *rsp;
+ uint32_t i, ret;
+
+ /* Get CPT MSI-X vector offsets */
+
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+
+ ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vf->nb_queues; i++)
+ vf->lf_msixoff[i] = rsp->cptlf_msixoff[i];
+
+ return 0;
+}
int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
uint16_t *nb_queues);
+int otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues);
+
+int otx2_cpt_queues_detach(const struct rte_cryptodev *dev);
+
+int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
+
#endif /* _OTX2_CRYPTODEV_MBOX_H_ */
#include <rte_cryptodev_pmd.h>
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
+#include "otx2_mbox.h"
+
+#include "cpt_hw_types.h"
+#include "cpt_pmd_logs.h"
+
+/* PMD ops */
+
+static int
+otx2_cpt_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *conf)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ int ret;
+
+ if (conf->nb_queue_pairs > vf->max_queues) {
+ CPT_LOG_ERR("Invalid number of queue pairs requested");
+ return -EINVAL;
+ }
+
+ dev->feature_flags &= ~conf->ff_disable;
+
+ /* Unregister error interrupts */
+ if (vf->err_intr_registered)
+ otx2_cpt_err_intr_unregister(dev);
+
+ /* Detach queues */
+ if (vf->nb_queues) {
+ ret = otx2_cpt_queues_detach(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not detach CPT queues");
+ return ret;
+ }
+ }
+
+ /* Attach queues */
+ ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
+ if (ret) {
+ CPT_LOG_ERR("Could not attach CPT queues");
+ return -ENODEV;
+ }
+
+ ret = otx2_cpt_msix_offsets_get(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not get MSI-X offsets");
+ goto queues_detach;
+ }
+
+ /* Register error interrupts */
+ ret = otx2_cpt_err_intr_register(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not register error interrupts");
+ goto queues_detach;
+ }
+
+ rte_mb();
+ return 0;
+
+queues_detach:
+ otx2_cpt_queues_detach(dev);
+ return ret;
+}
+
+static int
+otx2_cpt_dev_start(struct rte_cryptodev *dev)
+{
+ RTE_SET_USED(dev);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static void
+otx2_cpt_dev_stop(struct rte_cryptodev *dev)
+{
+ RTE_SET_USED(dev);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+}
+
+static int
+otx2_cpt_dev_close(struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ int ret = 0;
+
+ /* Unregister error interrupts */
+ if (vf->err_intr_registered)
+ otx2_cpt_err_intr_unregister(dev);
+
+ /* Detach queues */
+ if (vf->nb_queues) {
+ ret = otx2_cpt_queues_detach(dev);
+ if (ret)
+ CPT_LOG_ERR("Could not detach CPT queues");
+ }
+
+ return ret;
+}
+
+static void
+otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs = vf->max_queues;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = NULL;
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = otx2_cryptodev_driver_id;
+ info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
+ info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
+ }
+}
struct rte_cryptodev_ops otx2_cpt_ops = {
/* Device control ops */
- .dev_configure = NULL,
- .dev_start = NULL,
- .dev_stop = NULL,
- .dev_close = NULL,
- .dev_infos_get = NULL,
+ .dev_configure = otx2_cpt_dev_config,
+ .dev_start = otx2_cpt_dev_start,
+ .dev_stop = otx2_cpt_dev_stop,
+ .dev_close = otx2_cpt_dev_close,
+ .dev_infos_get = otx2_cpt_dev_info_get,
.stats_get = NULL,
.stats_reset = NULL,
#include <rte_cryptodev_pmd.h>
+#define OTX2_CPT_MIN_HEADROOM_REQ 24
+#define OTX2_CPT_MIN_TAILROOM_REQ 8
+
struct rte_cryptodev_ops otx2_cpt_ops;
#endif /* _OTX2_CRYPTODEV_OPS_H_ */