net/octeontx2: handle queue specific error interrupts
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev_irq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8
9 #include "otx2_ethdev.h"
10
11 static void
12 nix_lf_err_irq(void *param)
13 {
14         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
15         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
16         uint64_t intr;
17
18         intr = otx2_read64(dev->base + NIX_LF_ERR_INT);
19         if (intr == 0)
20                 return;
21
22         otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
23
24         /* Clear interrupt */
25         otx2_write64(intr, dev->base + NIX_LF_ERR_INT);
26 }
27
28 static int
29 nix_lf_register_err_irq(struct rte_eth_dev *eth_dev)
30 {
31         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
32         struct rte_intr_handle *handle = &pci_dev->intr_handle;
33         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
34         int rc, vec;
35
36         vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
37
38         /* Clear err interrupt */
39         otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
40         /* Set used interrupt vectors */
41         rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec);
42         /* Enable all dev interrupt except for RQ_DISABLED */
43         otx2_write64(~BIT_ULL(11), dev->base + NIX_LF_ERR_INT_ENA_W1S);
44
45         return rc;
46 }
47
48 static void
49 nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev)
50 {
51         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
52         struct rte_intr_handle *handle = &pci_dev->intr_handle;
53         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
54         int vec;
55
56         vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
57
58         /* Clear err interrupt */
59         otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
60         otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec);
61 }
62
63 static void
64 nix_lf_ras_irq(void *param)
65 {
66         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
67         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
68         uint64_t intr;
69
70         intr = otx2_read64(dev->base + NIX_LF_RAS);
71         if (intr == 0)
72                 return;
73
74         otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
75
76         /* Clear interrupt */
77         otx2_write64(intr, dev->base + NIX_LF_RAS);
78 }
79
80 static int
81 nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev)
82 {
83         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
84         struct rte_intr_handle *handle = &pci_dev->intr_handle;
85         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
86         int rc, vec;
87
88         vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
89
90         /* Clear err interrupt */
91         otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
92         /* Set used interrupt vectors */
93         rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec);
94         /* Enable dev interrupt */
95         otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S);
96
97         return rc;
98 }
99
100 static void
101 nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
102 {
103         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
104         struct rte_intr_handle *handle = &pci_dev->intr_handle;
105         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
106         int vec;
107
108         vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
109
110         /* Clear err interrupt */
111         otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
112         otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
113 }
114
115 static inline uint8_t
116 nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
117                            uint32_t off, uint64_t mask)
118 {
119         uint64_t reg, wdata;
120         uint8_t qint;
121
122         wdata = (uint64_t)q << 44;
123         reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
124
125         if (reg & BIT_ULL(42) /* OP_ERR */) {
126                 otx2_err("Failed execute irq get off=0x%x", off);
127                 return 0;
128         }
129
130         qint = reg & 0xff;
131         wdata &= mask;
132         otx2_write64(wdata, dev->base + off);
133
134         return qint;
135 }
136
137 static inline uint8_t
138 nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
139 {
140         return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
141 }
142
143 static inline uint8_t
144 nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
145 {
146         return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
147 }
148
149 static inline uint8_t
150 nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
151 {
152         return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
153 }
154
155 static inline void
156 nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
157 {
158         uint64_t reg;
159
160         reg = otx2_read64(dev->base + off);
161         if (reg & BIT_ULL(44))
162                 otx2_err("SQ=%d err_code=0x%x",
163                          (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
164 }
165
166 static void
167 nix_lf_q_irq(void *param)
168 {
169         struct otx2_qint *qint = (struct otx2_qint *)param;
170         struct rte_eth_dev *eth_dev = qint->eth_dev;
171         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
172         uint8_t irq, qintx = qint->qintx;
173         int q, cq, rq, sq;
174         uint64_t intr;
175
176         intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
177         if (intr == 0)
178                 return;
179
180         otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
181                  intr, qintx, dev->pf, dev->vf);
182
183         /* Handle RQ interrupts */
184         for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
185                 rq = q % dev->qints;
186                 irq = nix_lf_rq_irq_get_and_clear(dev, rq);
187
188                 if (irq & BIT_ULL(NIX_RQINT_DROP))
189                         otx2_err("RQ=%d NIX_RQINT_DROP", rq);
190
191                 if (irq & BIT_ULL(NIX_RQINT_RED))
192                         otx2_err("RQ=%d NIX_RQINT_RED", rq);
193         }
194
195         /* Handle CQ interrupts */
196         for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
197                 cq = q % dev->qints;
198                 irq = nix_lf_cq_irq_get_and_clear(dev, cq);
199
200                 if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
201                         otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
202
203                 if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
204                         otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
205
206                 if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
207                         otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
208         }
209
210         /* Handle SQ interrupts */
211         for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
212                 sq = q % dev->qints;
213                 irq = nix_lf_sq_irq_get_and_clear(dev, sq);
214
215                 if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
216                         otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
217                         nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
218                 }
219                 if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
220                         otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
221                         nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
222                 }
223                 if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
224                         otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
225                         nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
226                 }
227                 if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
228                         otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
229                         nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
230                 }
231         }
232
233         /* Clear interrupt */
234         otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
235 }
236
237 int
238 oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
239 {
240         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
241         struct rte_intr_handle *handle = &pci_dev->intr_handle;
242         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
243         int vec, q, sqs, rqs, qs, rc = 0;
244
245         /* Figure out max qintx required */
246         rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
247         sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
248         qs  = RTE_MAX(rqs, sqs);
249
250         dev->configured_qints = qs;
251
252         for (q = 0; q < qs; q++) {
253                 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
254
255                 /* Clear QINT CNT */
256                 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
257
258                 /* Clear interrupt */
259                 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
260
261                 dev->qints_mem[q].eth_dev = eth_dev;
262                 dev->qints_mem[q].qintx = q;
263
264                 /* Sync qints_mem update */
265                 rte_smp_wmb();
266
267                 /* Register queue irq vector */
268                 rc = otx2_register_irq(handle, nix_lf_q_irq,
269                                        &dev->qints_mem[q], vec);
270                 if (rc)
271                         break;
272
273                 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
274                 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
275                 /* Enable QINT interrupt */
276                 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
277         }
278
279         return rc;
280 }
281
282 void
283 oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
284 {
285         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
286         struct rte_intr_handle *handle = &pci_dev->intr_handle;
287         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
288         int vec, q;
289
290         for (q = 0; q < dev->configured_qints; q++) {
291                 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
292
293                 /* Clear QINT CNT */
294                 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
295                 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
296
297                 /* Clear interrupt */
298                 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
299
300                 /* Unregister queue irq vector */
301                 otx2_unregister_irq(handle, nix_lf_q_irq,
302                                     &dev->qints_mem[q], vec);
303         }
304 }
305
306 int
307 otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
308 {
309         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
310         int rc;
311
312         if (dev->nix_msixoff == MSIX_VECTOR_INVALID) {
313                 otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
314                          dev->nix_msixoff);
315                 return -EINVAL;
316         }
317
318         /* Register lf err interrupt */
319         rc = nix_lf_register_err_irq(eth_dev);
320         /* Register RAS interrupt */
321         rc |= nix_lf_register_ras_irq(eth_dev);
322
323         return rc;
324 }
325
326 void
327 otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev)
328 {
329         nix_lf_unregister_err_irq(eth_dev);
330         nix_lf_unregister_ras_irq(eth_dev);
331 }