1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 npa_err_irq(void *param)
11 struct npa_lf *lf = (struct npa_lf *)param;
14 intr = plt_read64(lf->base + NPA_LF_ERR_INT);
18 plt_err("Err_intr=0x%" PRIx64 "", intr);
21 plt_write64(intr, lf->base + NPA_LF_ERR_INT);
25 npa_register_err_irq(struct npa_lf *lf)
27 struct plt_intr_handle *handle = lf->intr_handle;
30 vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
32 /* Clear err interrupt */
33 plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
34 /* Register err interrupt vector */
35 rc = dev_irq_register(handle, npa_err_irq, lf, vec);
37 /* Enable hw interrupt */
38 plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1S);
44 npa_unregister_err_irq(struct npa_lf *lf)
46 struct plt_intr_handle *handle = lf->intr_handle;
49 vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
51 /* Clear err interrupt */
52 plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
53 dev_irq_unregister(handle, npa_err_irq, lf, vec);
57 npa_ras_irq(void *param)
59 struct npa_lf *lf = (struct npa_lf *)param;
62 intr = plt_read64(lf->base + NPA_LF_RAS);
66 plt_err("Ras_intr=0x%" PRIx64 "", intr);
69 plt_write64(intr, lf->base + NPA_LF_RAS);
73 npa_register_ras_irq(struct npa_lf *lf)
75 struct plt_intr_handle *handle = lf->intr_handle;
78 vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
80 /* Clear err interrupt */
81 plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
82 /* Set used interrupt vectors */
83 rc = dev_irq_register(handle, npa_ras_irq, lf, vec);
84 /* Enable hw interrupt */
85 plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1S);
91 npa_unregister_ras_irq(struct npa_lf *lf)
94 struct plt_intr_handle *handle = lf->intr_handle;
96 vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
98 /* Clear err interrupt */
99 plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
100 dev_irq_unregister(handle, npa_ras_irq, lf, vec);
103 static inline uint8_t
104 npa_q_irq_get_and_clear(struct npa_lf *lf, uint32_t q, uint32_t off,
110 wdata = (uint64_t)q << 44;
111 reg = roc_atomic64_add_nosync(wdata, (int64_t *)(lf->base + off));
113 if (reg & BIT_ULL(42) /* OP_ERR */) {
114 plt_err("Failed execute irq get off=0x%x", off);
120 plt_write64(wdata | qint, lf->base + off);
125 static inline uint8_t
126 npa_pool_irq_get_and_clear(struct npa_lf *lf, uint32_t p)
128 return npa_q_irq_get_and_clear(lf, p, NPA_LF_POOL_OP_INT, ~0xff00);
131 static inline uint8_t
132 npa_aura_irq_get_and_clear(struct npa_lf *lf, uint32_t a)
134 return npa_q_irq_get_and_clear(lf, a, NPA_LF_AURA_OP_INT, ~0xff00);
138 npa_q_irq(void *param)
140 struct npa_qint *qint = (struct npa_qint *)param;
141 struct npa_lf *lf = qint->lf;
142 uint8_t irq, qintx = qint->qintx;
143 uint32_t q, pool, aura;
146 intr = plt_read64(lf->base + NPA_LF_QINTX_INT(qintx));
150 plt_err("queue_intr=0x%" PRIx64 " qintx=%d", intr, qintx);
152 /* Handle pool queue interrupts */
153 for (q = 0; q < lf->nr_pools; q++) {
154 /* Skip disabled POOL */
155 if (plt_bitmap_get(lf->npa_bmp, q))
158 pool = q % lf->qints;
159 irq = npa_pool_irq_get_and_clear(lf, pool);
161 if (irq & BIT_ULL(NPA_POOL_ERR_INT_OVFLS))
162 plt_err("Pool=%d NPA_POOL_ERR_INT_OVFLS", pool);
164 if (irq & BIT_ULL(NPA_POOL_ERR_INT_RANGE))
165 plt_err("Pool=%d NPA_POOL_ERR_INT_RANGE", pool);
167 if (irq & BIT_ULL(NPA_POOL_ERR_INT_PERR))
168 plt_err("Pool=%d NPA_POOL_ERR_INT_PERR", pool);
171 /* Handle aura queue interrupts */
172 for (q = 0; q < lf->nr_pools; q++) {
173 /* Skip disabled AURA */
174 if (plt_bitmap_get(lf->npa_bmp, q))
177 aura = q % lf->qints;
178 irq = npa_aura_irq_get_and_clear(lf, aura);
180 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_OVER))
181 plt_err("Aura=%d NPA_AURA_ERR_INT_ADD_OVER", aura);
183 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_UNDER))
184 plt_err("Aura=%d NPA_AURA_ERR_INT_ADD_UNDER", aura);
186 if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_FREE_UNDER))
187 plt_err("Aura=%d NPA_AURA_ERR_INT_FREE_UNDER", aura);
189 if (irq & BIT_ULL(NPA_AURA_ERR_INT_POOL_DIS))
190 plt_err("Aura=%d NPA_AURA_ERR_POOL_DIS", aura);
193 /* Clear interrupt */
194 plt_write64(intr, lf->base + NPA_LF_QINTX_INT(qintx));
199 npa_register_queue_irqs(struct npa_lf *lf)
201 struct plt_intr_handle *handle = lf->intr_handle;
202 int vec, q, qs, rc = 0;
204 /* Figure out max qintx required */
205 qs = PLT_MIN(lf->qints, lf->nr_pools);
207 for (q = 0; q < qs; q++) {
208 vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
211 plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
213 /* Clear interrupt */
214 plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
216 struct npa_qint *qintmem = lf->npa_qint_mem;
223 /* Sync qints_mem update */
226 /* Register queue irq vector */
227 rc = dev_irq_register(handle, npa_q_irq, qintmem, vec);
231 plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
232 plt_write64(0, lf->base + NPA_LF_QINTX_INT(q));
233 /* Enable QINT interrupt */
234 plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1S(q));
241 npa_unregister_queue_irqs(struct npa_lf *lf)
243 struct plt_intr_handle *handle = lf->intr_handle;
246 /* Figure out max qintx required */
247 qs = PLT_MIN(lf->qints, lf->nr_pools);
249 for (q = 0; q < qs; q++) {
250 vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
253 plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
254 plt_write64(0, lf->base + NPA_LF_QINTX_INT(q));
256 /* Clear interrupt */
257 plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
259 struct npa_qint *qintmem = lf->npa_qint_mem;
263 /* Unregister queue irq vector */
264 dev_irq_unregister(handle, npa_q_irq, qintmem, vec);
272 npa_register_irqs(struct npa_lf *lf)
276 if (lf->npa_msixoff == MSIX_VECTOR_INVALID) {
277 plt_err("Invalid NPALF MSIX vector offset vector: 0x%x",
279 return NPA_ERR_PARAM;
282 /* Register lf err interrupt */
283 rc = npa_register_err_irq(lf);
284 /* Register RAS interrupt */
285 rc |= npa_register_ras_irq(lf);
286 /* Register queue interrupts */
287 rc |= npa_register_queue_irqs(lf);
293 npa_unregister_irqs(struct npa_lf *lf)
295 npa_unregister_err_irq(lf);
296 npa_unregister_ras_irq(lf);
297 npa_unregister_queue_irqs(lf);