1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define WORK_LIMIT 1000
11 nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
13 uintptr_t getwrk_op = inl_dev->ssow_base + SSOW_LF_GWS_OP_GET_WORK0;
14 uintptr_t tag_wqe_op = inl_dev->ssow_base + SSOW_LF_GWS_WQE0;
15 uint32_t wdata = BIT(16) | 1;
24 /* Try to do get work */
26 plt_write64(gw.u64[0], getwrk_op);
28 roc_load_pair(gw.u64[0], gw.u64[1], tag_wqe_op);
29 } while (gw.u64[0] & BIT_ULL(63));
32 /* Do we have any work? */
35 inl_dev->work_cb(gw.u64, inl_dev->cb_args, false);
37 plt_warn("Undelivered inl dev work gw0: %p gw1: %p",
38 (void *)gw.u64[0], (void *)gw.u64[1]);
44 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
48 nix_inl_nix_reg_dump(struct nix_inl_dev *inl_dev)
50 uintptr_t nix_base = inl_dev->nix_base;
52 /* General registers */
53 nix_lf_gen_reg_dump(nix_base, NULL);
55 /* Rx, Tx stat registers */
56 nix_lf_stat_reg_dump(nix_base, NULL, inl_dev->lf_tx_stats,
57 inl_dev->lf_rx_stats);
60 nix_lf_int_reg_dump(nix_base, NULL, inl_dev->qints, inl_dev->cints);
66 nix_inl_sso_hwgrp_irq(void *param)
68 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
69 uintptr_t sso_base = inl_dev->sso_base;
72 intr = plt_read64(sso_base + SSO_LF_GGRP_INT);
76 /* Check for work executable interrupt */
78 nix_inl_sso_work_cb(inl_dev);
81 plt_err("GGRP 0 GGRP_INT=0x%" PRIx64 "", intr);
84 plt_write64(intr, sso_base + SSO_LF_GGRP_INT);
88 nix_inl_sso_hws_irq(void *param)
90 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
91 uintptr_t ssow_base = inl_dev->ssow_base;
94 intr = plt_read64(ssow_base + SSOW_LF_GWS_INT);
98 plt_err("GWS 0 GWS_INT=0x%" PRIx64 "", intr);
100 /* Clear interrupt */
101 plt_write64(intr, ssow_base + SSOW_LF_GWS_INT);
105 nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev)
107 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
108 uintptr_t ssow_base = inl_dev->ssow_base;
109 uintptr_t sso_base = inl_dev->sso_base;
110 uint16_t sso_msixoff, ssow_msixoff;
113 ssow_msixoff = inl_dev->ssow_msixoff;
114 sso_msixoff = inl_dev->sso_msixoff;
115 if (sso_msixoff == MSIX_VECTOR_INVALID ||
116 ssow_msixoff == MSIX_VECTOR_INVALID) {
117 plt_err("Invalid SSO/SSOW MSIX offsets (0x%x, 0x%x)",
118 sso_msixoff, ssow_msixoff);
123 * Setup SSOW interrupt
126 /* Clear SSOW interrupt enable */
127 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
128 /* Register interrupt with vfio */
129 rc = dev_irq_register(handle, nix_inl_sso_hws_irq, inl_dev,
130 ssow_msixoff + SSOW_LF_INT_VEC_IOP);
131 /* Set SSOW interrupt enable */
132 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1S);
135 * Setup SSO/HWGRP interrupt
138 /* Clear SSO interrupt enable */
139 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
141 rc |= dev_irq_register(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
142 sso_msixoff + SSO_LF_INT_VEC_GRP);
143 /* Enable hw interrupt */
144 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1S);
146 /* Setup threshold for work exec interrupt to 100us timeout
147 * based on time counter.
149 plt_write64(BIT_ULL(63) | 10ULL << 48, sso_base + SSO_LF_GGRP_INT_THR);
155 nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
157 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
158 uintptr_t ssow_base = inl_dev->ssow_base;
159 uintptr_t sso_base = inl_dev->sso_base;
160 uint16_t sso_msixoff, ssow_msixoff;
162 ssow_msixoff = inl_dev->ssow_msixoff;
163 sso_msixoff = inl_dev->sso_msixoff;
165 /* Clear SSOW interrupt enable */
166 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
167 /* Clear SSO/HWGRP interrupt enable */
168 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
169 /* Clear SSO threshold */
170 plt_write64(0, sso_base + SSO_LF_GGRP_INT_THR);
173 dev_irq_unregister(handle, nix_inl_sso_hws_irq, (void *)inl_dev,
174 ssow_msixoff + SSOW_LF_INT_VEC_IOP);
175 dev_irq_unregister(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
176 sso_msixoff + SSO_LF_INT_VEC_GRP);
180 nix_inl_nix_q_irq(void *param)
182 struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
183 struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
184 uintptr_t nix_base = inl_dev->nix_base;
185 struct dev *dev = &inl_dev->dev;
186 uint16_t qint = qints_mem->qint;
193 intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
197 plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
200 /* Handle RQ interrupts */
201 for (q = 0; q < inl_dev->nb_rqs; q++) {
202 /* Get and clear RQ interrupts */
203 wdata = (uint64_t)q << 44;
204 reg = roc_atomic64_add_nosync(wdata,
205 (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
206 if (reg & BIT_ULL(42) /* OP_ERR */) {
207 plt_err("Failed to get rq_int");
211 plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
213 if (irq & BIT_ULL(NIX_RQINT_DROP))
214 plt_err("RQ=0 NIX_RQINT_DROP");
216 if (irq & BIT_ULL(NIX_RQINT_RED))
217 plt_err("RQ=0 NIX_RQINT_RED");
220 /* Clear interrupt */
221 plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
223 /* Dump registers to std out */
224 nix_inl_nix_reg_dump(inl_dev);
227 for (q = 0; q < inl_dev->nb_rqs; q++) {
228 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
230 plt_err("Failed to get rq %d context, rc=%d", q, rc);
238 nix_inl_nix_ras_irq(void *param)
240 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
241 uintptr_t nix_base = inl_dev->nix_base;
242 struct dev *dev = &inl_dev->dev;
247 intr = plt_read64(nix_base + NIX_LF_RAS);
251 plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
252 /* Clear interrupt */
253 plt_write64(intr, nix_base + NIX_LF_RAS);
255 /* Dump registers to std out */
256 nix_inl_nix_reg_dump(inl_dev);
259 for (q = 0; q < inl_dev->nb_rqs; q++) {
260 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
262 plt_err("Failed to get rq %d context, rc=%d", q, rc);
270 nix_inl_nix_err_irq(void *param)
272 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
273 uintptr_t nix_base = inl_dev->nix_base;
274 struct dev *dev = &inl_dev->dev;
279 intr = plt_read64(nix_base + NIX_LF_ERR_INT);
283 plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
285 /* Clear interrupt */
286 plt_write64(intr, nix_base + NIX_LF_ERR_INT);
288 /* Dump registers to std out */
289 nix_inl_nix_reg_dump(inl_dev);
292 for (q = 0; q < inl_dev->nb_rqs; q++) {
293 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
295 plt_err("Failed to get rq %d context, rc=%d", q, rc);
303 nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
305 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
306 uintptr_t nix_base = inl_dev->nix_base;
307 struct nix_inl_qint *qints_mem;
312 msixoff = inl_dev->nix_msixoff;
313 if (msixoff == MSIX_VECTOR_INVALID) {
314 plt_err("Invalid NIXLF MSIX vector offset: 0x%x", msixoff);
318 /* Disable err interrupts */
319 plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
320 /* DIsable RAS interrupts */
321 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
323 /* Register err irq */
324 rc = dev_irq_register(handle, nix_inl_nix_err_irq, inl_dev,
325 msixoff + NIX_LF_INT_VEC_ERR_INT);
326 rc |= dev_irq_register(handle, nix_inl_nix_ras_irq, inl_dev,
327 msixoff + NIX_LF_INT_VEC_POISON);
329 /* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
330 plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
331 nix_base + NIX_LF_ERR_INT_ENA_W1S);
332 /* Enable RAS interrupts */
333 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
335 /* Setup queue irq for RQ's */
336 qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
337 qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
339 plt_err("Failed to allocate memory for %u qints", qints);
343 inl_dev->configured_qints = qints;
344 inl_dev->qints_mem = qints_mem;
346 for (q = 0; q < qints; q++) {
347 /* Clear QINT CNT, interrupt */
348 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
349 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
351 /* Register queue irq vector */
352 ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
353 msixoff + NIX_LF_INT_VEC_QINT_START + q);
357 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
358 plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
359 /* Enable QINT interrupt */
360 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
362 qints_mem[q].inl_dev = inl_dev;
363 qints_mem[q].qint = q;
371 nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
373 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
374 struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
375 uintptr_t nix_base = inl_dev->nix_base;
379 msixoff = inl_dev->nix_msixoff;
380 /* Disable err interrupts */
381 plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
382 /* DIsable RAS interrupts */
383 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
385 dev_irq_unregister(handle, nix_inl_nix_err_irq, inl_dev,
386 msixoff + NIX_LF_INT_VEC_ERR_INT);
387 dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
388 msixoff + NIX_LF_INT_VEC_POISON);
390 for (q = 0; q < inl_dev->configured_qints; q++) {
392 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
393 plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
395 /* Disable QINT interrupt */
396 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
398 /* Unregister queue irq vector */
399 dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
400 msixoff + NIX_LF_INT_VEC_QINT_START + q);
403 plt_free(inl_dev->qints_mem);
404 inl_dev->qints_mem = NULL;