1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_common.h>
8 #include <rte_bus_pci.h>
10 #include "otx2_common.h"
12 #include "otx2_mempool.h"
15 npa_lf_err_irq(void *param)
17 struct otx2_npa_lf *lf = (struct otx2_npa_lf *)param;
20 intr = otx2_read64(lf->base + NPA_LF_ERR_INT);
24 otx2_err("Err_intr=0x%" PRIx64 "", intr);
27 otx2_write64(intr, lf->base + NPA_LF_ERR_INT);
31 npa_lf_register_err_irq(struct otx2_npa_lf *lf)
33 struct rte_intr_handle *handle = lf->intr_handle;
36 vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
38 /* Clear err interrupt */
39 otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
40 /* Register err interrupt vector */
41 rc = otx2_register_irq(handle, npa_lf_err_irq, lf, vec);
43 /* Enable hw interrupt */
44 otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1S);
50 npa_lf_unregister_err_irq(struct otx2_npa_lf *lf)
52 struct rte_intr_handle *handle = lf->intr_handle;
55 vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
57 /* Clear err interrupt */
58 otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
59 otx2_unregister_irq(handle, npa_lf_err_irq, lf, vec);
63 npa_lf_ras_irq(void *param)
65 struct otx2_npa_lf *lf = (struct otx2_npa_lf *)param;
68 intr = otx2_read64(lf->base + NPA_LF_RAS);
72 otx2_err("Ras_intr=0x%" PRIx64 "", intr);
75 otx2_write64(intr, lf->base + NPA_LF_RAS);
79 npa_lf_register_ras_irq(struct otx2_npa_lf *lf)
81 struct rte_intr_handle *handle = lf->intr_handle;
84 vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
86 /* Clear err interrupt */
87 otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
88 /* Set used interrupt vectors */
89 rc = otx2_register_irq(handle, npa_lf_ras_irq, lf, vec);
90 /* Enable hw interrupt */
91 otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1S);
97 npa_lf_unregister_ras_irq(struct otx2_npa_lf *lf)
100 struct rte_intr_handle *handle = lf->intr_handle;
102 vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
104 /* Clear err interrupt */
105 otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
106 otx2_unregister_irq(handle, npa_lf_ras_irq, lf, vec);
109 static inline uint8_t
110 npa_lf_q_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t q,
111 uint32_t off, uint64_t mask)
116 wdata = (uint64_t)q << 44;
117 reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(lf->base + off));
119 if (reg & BIT_ULL(42) /* OP_ERR */) {
120 otx2_err("Failed execute irq get off=0x%x", off);
126 otx2_write64(wdata | qint, lf->base + off);
131 static inline uint8_t
132 npa_lf_pool_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t p)
134 return npa_lf_q_irq_get_and_clear(lf, p, NPA_LF_POOL_OP_INT, ~0xff00);
137 static inline uint8_t
138 npa_lf_aura_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t a)
140 return npa_lf_q_irq_get_and_clear(lf, a, NPA_LF_AURA_OP_INT, ~0xff00);
144 npa_lf_q_irq(void *param)
146 struct otx2_npa_qint *qint = (struct otx2_npa_qint *)param;
147 struct otx2_npa_lf *lf = qint->lf;
148 uint8_t irq, qintx = qint->qintx;
149 uint32_t q, pool, aura;
152 intr = otx2_read64(lf->base + NPA_LF_QINTX_INT(qintx));
156 otx2_err("queue_intr=0x%" PRIx64 " qintx=%d", intr, qintx);
158 /* Handle pool queue interrupts */
159 for (q = 0; q < lf->nr_pools; q++) {
160 /* Skip disabled POOL */
161 if (rte_bitmap_get(lf->npa_bmp, q))
164 pool = q % lf->qints;
165 irq = npa_lf_pool_irq_get_and_clear(lf, pool);
167 if (irq & BIT_ULL(NPA_POOL_ERR_INT_OVFLS))
168 otx2_err("Pool=%d NPA_POOL_ERR_INT_OVFLS", pool);
170 if (irq & BIT_ULL(NPA_POOL_ERR_INT_RANGE))
171 otx2_err("Pool=%d NPA_POOL_ERR_INT_RANGE", pool);
173 if (irq & BIT_ULL(NPA_POOL_ERR_INT_PERR))
174 otx2_err("Pool=%d NPA_POOL_ERR_INT_PERR", pool);
177 /* Handle aura queue interrupts */
178 for (q = 0; q < lf->nr_pools; q++) {
180 /* Skip disabled AURA */
181 if (rte_bitmap_get(lf->npa_bmp, q))
184 aura = q % lf->qints;
185 irq = npa_lf_aura_irq_get_and_clear(lf, aura);
187 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_OVER))
188 otx2_err("Aura=%d NPA_AURA_ERR_INT_ADD_OVER", aura);
190 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_UNDER))
191 otx2_err("Aura=%d NPA_AURA_ERR_INT_ADD_UNDER", aura);
193 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_FREE_UNDER))
194 otx2_err("Aura=%d NPA_AURA_ERR_INT_FREE_UNDER", aura);
196 if (irq & BIT_ULL(NPA_AURA_ERR_INT_POOL_DIS))
197 otx2_err("Aura=%d NPA_AURA_ERR_POOL_DIS", aura);
200 /* Clear interrupt */
201 otx2_write64(intr, lf->base + NPA_LF_QINTX_INT(qintx));
202 otx2_mempool_ctx_dump(lf);
206 npa_lf_register_queue_irqs(struct otx2_npa_lf *lf)
208 struct rte_intr_handle *handle = lf->intr_handle;
209 int vec, q, qs, rc = 0;
211 /* Figure out max qintx required */
212 qs = RTE_MIN(lf->qints, lf->nr_pools);
214 for (q = 0; q < qs; q++) {
215 vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
218 otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
220 /* Clear interrupt */
221 otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
223 struct otx2_npa_qint *qintmem = lf->npa_qint_mem;
229 /* Sync qints_mem update */
232 /* Register queue irq vector */
233 rc = otx2_register_irq(handle, npa_lf_q_irq, qintmem, vec);
237 otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
238 otx2_write64(0, lf->base + NPA_LF_QINTX_INT(q));
239 /* Enable QINT interrupt */
240 otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1S(q));
247 npa_lf_unregister_queue_irqs(struct otx2_npa_lf *lf)
249 struct rte_intr_handle *handle = lf->intr_handle;
252 /* Figure out max qintx required */
253 qs = RTE_MIN(lf->qints, lf->nr_pools);
255 for (q = 0; q < qs; q++) {
256 vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
259 otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
260 otx2_write64(0, lf->base + NPA_LF_QINTX_INT(q));
262 /* Clear interrupt */
263 otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
265 struct otx2_npa_qint *qintmem = lf->npa_qint_mem;
268 /* Unregister queue irq vector */
269 otx2_unregister_irq(handle, npa_lf_q_irq, qintmem, vec);
277 otx2_npa_register_irqs(struct otx2_npa_lf *lf)
281 if (lf->npa_msixoff == MSIX_VECTOR_INVALID) {
282 otx2_err("Invalid NPALF MSIX vector offset vector: 0x%x",
287 /* Register lf err interrupt */
288 rc = npa_lf_register_err_irq(lf);
289 /* Register RAS interrupt */
290 rc |= npa_lf_register_ras_irq(lf);
291 /* Register queue interrupts */
292 rc |= npa_lf_register_queue_irqs(lf);
298 otx2_npa_unregister_irqs(struct otx2_npa_lf *lf)
300 npa_lf_unregister_err_irq(lf);
301 npa_lf_unregister_ras_irq(lf);
302 npa_lf_unregister_queue_irqs(lf);