1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
4 #include <rte_cryptodev.h>
6 #include "otx2_common.h"
7 #include "otx2_cryptodev.h"
8 #include "otx2_cryptodev_hw_access.h"
9 #include "otx2_cryptodev_mbox.h"
10 #include "otx2_cryptodev_ops.h"
13 #include "cpt_pmd_logs.h"
16 otx2_cpt_lf_err_intr_handler(void *param)
18 uintptr_t base = (uintptr_t)param;
22 lf_id = (base >> 12) & 0xFF;
24 intr = otx2_read64(base + OTX2_CPT_LF_MISC_INT);
28 CPT_LOG_ERR("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
31 otx2_write64(intr, base + OTX2_CPT_LF_MISC_INT);
35 otx2_cpt_lf_err_intr_unregister(const struct rte_cryptodev *dev,
36 uint16_t msix_off, uintptr_t base)
38 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
39 struct rte_intr_handle *handle = &pci_dev->intr_handle;
41 /* Disable error interrupts */
42 otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
44 otx2_unregister_irq(handle, otx2_cpt_lf_err_intr_handler, (void *)base,
49 otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev)
51 struct otx2_cpt_vf *vf = dev->data->dev_private;
55 for (i = 0; i < vf->nb_queues; i++) {
56 base = OTX2_CPT_LF_BAR2(vf, i);
57 otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
60 vf->err_intr_registered = 0;
64 otx2_cpt_lf_err_intr_register(const struct rte_cryptodev *dev,
65 uint16_t msix_off, uintptr_t base)
67 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
68 struct rte_intr_handle *handle = &pci_dev->intr_handle;
71 /* Disable error interrupts */
72 otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
74 /* Register error interrupt handler */
75 ret = otx2_register_irq(handle, otx2_cpt_lf_err_intr_handler,
76 (void *)base, msix_off);
80 /* Enable error interrupts */
81 otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1S);
87 otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
89 struct otx2_cpt_vf *vf = dev->data->dev_private;
93 for (i = 0; i < vf->nb_queues; i++) {
94 if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
95 CPT_LOG_ERR("Invalid CPT LF MSI-X offset: 0x%x",
101 for (i = 0; i < vf->nb_queues; i++) {
102 base = OTX2_CPT_LF_BAR2(vf, i);
103 ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
106 goto intr_unregister;
109 vf->err_intr_registered = 1;
113 /* Unregister the ones already registered */
114 for (j = 0; j < i; j++) {
115 base = OTX2_CPT_LF_BAR2(vf, j);
116 otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
120 * Failed to register error interrupt. Not returning error as this would
121 * prevent application from enabling larger number of devs.
123 * This failure is a known issue because otx2_dev_init() initializes
124 * interrupts based on static values from ATF, and the actual number
125 * of interrupts needed (which is based on LFs) can be determined only
126 * after otx2_dev_init() sets up interrupts which includes mbox
133 otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
134 const struct otx2_cpt_qp *qp, uint8_t grp_mask, uint8_t pri,
137 union otx2_cpt_af_lf_ctl af_lf_ctl;
138 union otx2_cpt_lf_inprog inprog;
139 union otx2_cpt_lf_q_base base;
140 union otx2_cpt_lf_q_size size;
141 union otx2_cpt_lf_ctl lf_ctl;
144 /* Set engine group mask and priority */
146 ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
150 af_lf_ctl.s.grp = grp_mask;
151 af_lf_ctl.s.pri = pri ? 1 : 0;
152 ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
157 /* Set instruction queue base address */
159 base.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_BASE);
162 base.s.addr = qp->iq_dma_addr >> 7;
163 otx2_write64(base.u, qp->base + OTX2_CPT_LF_Q_BASE);
165 /* Set instruction queue size */
167 size.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_SIZE);
168 size.s.size_div40 = size_div40;
169 otx2_write64(size.u, qp->base + OTX2_CPT_LF_Q_SIZE);
171 /* Enable instruction queue */
173 lf_ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
175 otx2_write64(lf_ctl.u, qp->base + OTX2_CPT_LF_CTL);
177 /* Start instruction execution */
179 inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
181 otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
187 otx2_cpt_iq_disable(struct otx2_cpt_qp *qp)
189 union otx2_cpt_lf_q_grp_ptr grp_ptr;
190 union otx2_cpt_lf_inprog inprog;
191 union otx2_cpt_lf_ctl ctl;
194 /* Stop instruction execution */
195 inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
197 otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
199 /* Disable instructions enqueuing */
200 ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
202 otx2_write64(ctl.u, qp->base + OTX2_CPT_LF_CTL);
204 /* Wait for instruction queue to become empty */
207 inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
208 if (inprog.s.grb_partial)
212 grp_ptr.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_GRP_PTR);
213 } while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
217 inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
218 if ((inprog.s.inflight == 0) &&
219 (inprog.s.gwb_cnt < 40) &&
220 ((inprog.s.grb_cnt == 0) || (inprog.s.grb_cnt == 40)))