net/mlx5: add flow flush
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev_irq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #define WORK_LIMIT 1000
9
10 static void
11 nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
12 {
13         uintptr_t getwrk_op = inl_dev->ssow_base + SSOW_LF_GWS_OP_GET_WORK0;
14         uintptr_t tag_wqe_op = inl_dev->ssow_base + SSOW_LF_GWS_WQE0;
15         uint32_t wdata = BIT(16) | 1;
16         union {
17                 __uint128_t get_work;
18                 uint64_t u64[2];
19         } gw;
20         uint16_t cnt = 0;
21         uint64_t work;
22
23 again:
24         /* Try to do get work */
25         gw.get_work = wdata;
26         plt_write64(gw.u64[0], getwrk_op);
27         do {
28                 roc_load_pair(gw.u64[0], gw.u64[1], tag_wqe_op);
29         } while (gw.u64[0] & BIT_ULL(63));
30
31         work = gw.u64[1];
32         /* Do we have any work? */
33         if (work) {
34                 if (inl_dev->work_cb)
35                         inl_dev->work_cb(gw.u64, inl_dev->cb_args);
36                 else
37                         plt_warn("Undelivered inl dev work gw0: %p gw1: %p",
38                                  (void *)gw.u64[0], (void *)gw.u64[1]);
39                 cnt++;
40                 if (cnt < WORK_LIMIT)
41                         goto again;
42         }
43
44         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
45 }
46
47 static int
48 nix_inl_nix_reg_dump(struct nix_inl_dev *inl_dev)
49 {
50         uintptr_t nix_base = inl_dev->nix_base;
51
52         /* General registers */
53         nix_lf_gen_reg_dump(nix_base, NULL);
54
55         /* Rx, Tx stat registers */
56         nix_lf_stat_reg_dump(nix_base, NULL, inl_dev->lf_tx_stats,
57                              inl_dev->lf_rx_stats);
58
59         /* Intr registers */
60         nix_lf_int_reg_dump(nix_base, NULL, inl_dev->qints, inl_dev->cints);
61
62         return 0;
63 }
64
65 static void
66 nix_inl_sso_hwgrp_irq(void *param)
67 {
68         struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
69         uintptr_t sso_base = inl_dev->sso_base;
70         uint64_t intr;
71
72         intr = plt_read64(sso_base + SSO_LF_GGRP_INT);
73         if (intr == 0)
74                 return;
75
76         /* Check for work executable interrupt */
77         if (intr & BIT(1))
78                 nix_inl_sso_work_cb(inl_dev);
79
80         if (intr & ~BIT(1))
81                 plt_err("GGRP 0 GGRP_INT=0x%" PRIx64 "", intr);
82
83         /* Clear interrupt */
84         plt_write64(intr, sso_base + SSO_LF_GGRP_INT);
85 }
86
87 static void
88 nix_inl_sso_hws_irq(void *param)
89 {
90         struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
91         uintptr_t ssow_base = inl_dev->ssow_base;
92         uint64_t intr;
93
94         intr = plt_read64(ssow_base + SSOW_LF_GWS_INT);
95         if (intr == 0)
96                 return;
97
98         plt_err("GWS 0 GWS_INT=0x%" PRIx64 "", intr);
99
100         /* Clear interrupt */
101         plt_write64(intr, ssow_base + SSOW_LF_GWS_INT);
102 }
103
104 int
105 nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev)
106 {
107         struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
108         uintptr_t ssow_base = inl_dev->ssow_base;
109         uintptr_t sso_base = inl_dev->sso_base;
110         uint16_t sso_msixoff, ssow_msixoff;
111         int rc;
112
113         ssow_msixoff = inl_dev->ssow_msixoff;
114         sso_msixoff = inl_dev->sso_msixoff;
115         if (sso_msixoff == MSIX_VECTOR_INVALID ||
116             ssow_msixoff == MSIX_VECTOR_INVALID) {
117                 plt_err("Invalid SSO/SSOW MSIX offsets (0x%x, 0x%x)",
118                         sso_msixoff, ssow_msixoff);
119                 return -EINVAL;
120         }
121
122         /*
123          * Setup SSOW interrupt
124          */
125
126         /* Clear SSOW interrupt enable */
127         plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
128         /* Register interrupt with vfio */
129         rc = dev_irq_register(handle, nix_inl_sso_hws_irq, inl_dev,
130                               ssow_msixoff + SSOW_LF_INT_VEC_IOP);
131         /* Set SSOW interrupt enable */
132         plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1S);
133
134         /*
135          * Setup SSO/HWGRP interrupt
136          */
137
138         /* Clear SSO interrupt enable */
139         plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
140         /* Register IRQ */
141         rc |= dev_irq_register(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
142                                sso_msixoff + SSO_LF_INT_VEC_GRP);
143         /* Enable hw interrupt */
144         plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1S);
145
146         /* Setup threshold for work exec interrupt to 100us timeout
147          * based on time counter.
148          */
149         plt_write64(BIT_ULL(63) | 10ULL << 48, sso_base + SSO_LF_GGRP_INT_THR);
150
151         return rc;
152 }
153
154 void
155 nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
156 {
157         struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
158         uintptr_t ssow_base = inl_dev->ssow_base;
159         uintptr_t sso_base = inl_dev->sso_base;
160         uint16_t sso_msixoff, ssow_msixoff;
161
162         ssow_msixoff = inl_dev->ssow_msixoff;
163         sso_msixoff = inl_dev->sso_msixoff;
164
165         /* Clear SSOW interrupt enable */
166         plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
167         /* Clear SSO/HWGRP interrupt enable */
168         plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
169         /* Clear SSO threshold */
170         plt_write64(0, sso_base + SSO_LF_GGRP_INT_THR);
171
172         /* Unregister IRQ */
173         dev_irq_unregister(handle, nix_inl_sso_hws_irq, (void *)inl_dev,
174                            ssow_msixoff + SSOW_LF_INT_VEC_IOP);
175         dev_irq_unregister(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
176                            sso_msixoff + SSO_LF_INT_VEC_GRP);
177 }
178
179 static void
180 nix_inl_nix_q_irq(void *param)
181 {
182         struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
183         uintptr_t nix_base = inl_dev->nix_base;
184         struct dev *dev = &inl_dev->dev;
185         volatile void *ctx;
186         uint64_t reg, intr;
187         uint8_t irq;
188         int rc;
189
190         intr = plt_read64(nix_base + NIX_LF_QINTX_INT(0));
191         if (intr == 0)
192                 return;
193
194         plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
195                 dev->vf);
196
197         /* Get and clear RQ0 interrupt */
198         reg = roc_atomic64_add_nosync(0,
199                                       (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
200         if (reg & BIT_ULL(42) /* OP_ERR */) {
201                 plt_err("Failed to get rq_int");
202                 return;
203         }
204         irq = reg & 0xff;
205         plt_write64(0 | irq, nix_base + NIX_LF_RQ_OP_INT);
206
207         if (irq & BIT_ULL(NIX_RQINT_DROP))
208                 plt_err("RQ=0 NIX_RQINT_DROP");
209
210         if (irq & BIT_ULL(NIX_RQINT_RED))
211                 plt_err("RQ=0 NIX_RQINT_RED");
212
213         /* Clear interrupt */
214         plt_write64(intr, nix_base + NIX_LF_QINTX_INT(0));
215
216         /* Dump registers to std out */
217         nix_inl_nix_reg_dump(inl_dev);
218
219         /* Dump RQ 0 */
220         rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
221         if (rc) {
222                 plt_err("Failed to get rq context");
223                 return;
224         }
225         nix_lf_rq_dump(ctx);
226 }
227
228 static void
229 nix_inl_nix_ras_irq(void *param)
230 {
231         struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
232         uintptr_t nix_base = inl_dev->nix_base;
233         struct dev *dev = &inl_dev->dev;
234         volatile void *ctx;
235         uint64_t intr;
236         int rc;
237
238         intr = plt_read64(nix_base + NIX_LF_RAS);
239         if (intr == 0)
240                 return;
241
242         plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
243         /* Clear interrupt */
244         plt_write64(intr, nix_base + NIX_LF_RAS);
245
246         /* Dump registers to std out */
247         nix_inl_nix_reg_dump(inl_dev);
248
249         /* Dump RQ 0 */
250         rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
251         if (rc) {
252                 plt_err("Failed to get rq context");
253                 return;
254         }
255         nix_lf_rq_dump(ctx);
256 }
257
258 static void
259 nix_inl_nix_err_irq(void *param)
260 {
261         struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
262         uintptr_t nix_base = inl_dev->nix_base;
263         struct dev *dev = &inl_dev->dev;
264         volatile void *ctx;
265         uint64_t intr;
266         int rc;
267
268         intr = plt_read64(nix_base + NIX_LF_ERR_INT);
269         if (intr == 0)
270                 return;
271
272         plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
273
274         /* Clear interrupt */
275         plt_write64(intr, nix_base + NIX_LF_ERR_INT);
276
277         /* Dump registers to std out */
278         nix_inl_nix_reg_dump(inl_dev);
279
280         /* Dump RQ 0 */
281         rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
282         if (rc) {
283                 plt_err("Failed to get rq context");
284                 return;
285         }
286         nix_lf_rq_dump(ctx);
287 }
288
289 int
290 nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
291 {
292         struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
293         uintptr_t nix_base = inl_dev->nix_base;
294         uint16_t msixoff;
295         int rc;
296
297         msixoff = inl_dev->nix_msixoff;
298         if (msixoff == MSIX_VECTOR_INVALID) {
299                 plt_err("Invalid NIXLF MSIX vector offset: 0x%x", msixoff);
300                 return -EINVAL;
301         }
302
303         /* Disable err interrupts */
304         plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
305         /* DIsable RAS interrupts */
306         plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
307
308         /* Register err irq */
309         rc = dev_irq_register(handle, nix_inl_nix_err_irq, inl_dev,
310                               msixoff + NIX_LF_INT_VEC_ERR_INT);
311         rc |= dev_irq_register(handle, nix_inl_nix_ras_irq, inl_dev,
312                                msixoff + NIX_LF_INT_VEC_POISON);
313
314         /* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
315         plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
316                     nix_base + NIX_LF_ERR_INT_ENA_W1S);
317         /* Enable RAS interrupts */
318         plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
319
320         /* Setup queue irq for RQ 0 */
321
322         /* Clear QINT CNT, interrupt */
323         plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
324         plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
325
326         /* Register queue irq vector */
327         rc |= dev_irq_register(handle, nix_inl_nix_q_irq, inl_dev,
328                                msixoff + NIX_LF_INT_VEC_QINT_START);
329
330         plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
331         plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
332         /* Enable QINT interrupt */
333         plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(0));
334
335         return rc;
336 }
337
338 void
339 nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
340 {
341         struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
342         uintptr_t nix_base = inl_dev->nix_base;
343         uint16_t msixoff;
344
345         msixoff = inl_dev->nix_msixoff;
346         /* Disable err interrupts */
347         plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
348         /* DIsable RAS interrupts */
349         plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
350
351         dev_irq_unregister(handle, nix_inl_nix_err_irq, inl_dev,
352                            msixoff + NIX_LF_INT_VEC_ERR_INT);
353         dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
354                            msixoff + NIX_LF_INT_VEC_POISON);
355
356         /* Clear QINT CNT */
357         plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
358         plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
359
360         /* Disable QINT interrupt */
361         plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
362
363         /* Unregister queue irq vector */
364         dev_irq_unregister(handle, nix_inl_nix_q_irq, inl_dev,
365                            msixoff + NIX_LF_INT_VEC_QINT_START);
366 }