raw/ifpga/base: expose SEU error
[dpdk.git] / drivers / raw / ifpga / base / ifpga_fme_error.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include "ifpga_feature_dev.h"
6
7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
8 {
9         struct feature_fme_err *fme_err
10                 = get_fme_feature_ioaddr_by_index(fme,
11                                                   FME_FEATURE_ID_GLOBAL_ERR);
12         struct feature_fme_error0 fme_error0;
13
14         fme_error0.csr = readq(&fme_err->fme_err);
15         *val = fme_error0.csr;
16
17         return 0;
18 }
19
20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
21 {
22         struct feature_fme_err *fme_err
23                 = get_fme_feature_ioaddr_by_index(fme,
24                                                   FME_FEATURE_ID_GLOBAL_ERR);
25         struct feature_fme_first_error fme_first_err;
26
27         fme_first_err.csr = readq(&fme_err->fme_first_err);
28         *val = fme_first_err.err_reg_status;
29
30         return 0;
31 }
32
33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
34 {
35         struct feature_fme_err *fme_err
36                 = get_fme_feature_ioaddr_by_index(fme,
37                                                   FME_FEATURE_ID_GLOBAL_ERR);
38         struct feature_fme_next_error fme_next_err;
39
40         fme_next_err.csr = readq(&fme_err->fme_next_err);
41         *val = fme_next_err.err_reg_status;
42
43         return 0;
44 }
45
46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
47 {
48         struct feature_fme_err *fme_err
49                 = get_fme_feature_ioaddr_by_index(fme,
50                                                   FME_FEATURE_ID_GLOBAL_ERR);
51         struct feature_fme_error0 fme_error0;
52         struct feature_fme_first_error fme_first_err;
53         struct feature_fme_next_error fme_next_err;
54         int ret = 0;
55
56         spinlock_lock(&fme->lock);
57         writeq(GENMASK_ULL(63, 0), &fme_err->fme_err_mask);
58
59         fme_error0.csr = readq(&fme_err->fme_err);
60         if (val != fme_error0.csr) {
61                 ret = -EBUSY;
62                 goto exit;
63         }
64
65         fme_first_err.csr = readq(&fme_err->fme_first_err);
66         fme_next_err.csr = readq(&fme_err->fme_next_err);
67
68         writeq(fme_error0.csr, &fme_err->fme_err);
69         writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
70                &fme_err->fme_first_err);
71         writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
72                &fme_err->fme_next_err);
73
74 exit:
75         writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
76         spinlock_unlock(&fme->lock);
77
78         return ret;
79 }
80
81 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
82 {
83         struct feature_fme_err *fme_err
84                 = get_fme_feature_ioaddr_by_index(fme,
85                                                   FME_FEATURE_ID_GLOBAL_ERR);
86         struct feature_header header;
87
88         header.csr = readq(&fme_err->header);
89         *val = header.revision;
90
91         return 0;
92 }
93
94 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
95 {
96         struct feature_fme_err *fme_err
97                 = get_fme_feature_ioaddr_by_index(fme,
98                                                   FME_FEATURE_ID_GLOBAL_ERR);
99         struct feature_fme_pcie0_error pcie0_err;
100
101         pcie0_err.csr = readq(&fme_err->pcie0_err);
102         *val = pcie0_err.csr;
103
104         return 0;
105 }
106
107 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
108 {
109         struct feature_fme_err *fme_err
110                 = get_fme_feature_ioaddr_by_index(fme,
111                                                   FME_FEATURE_ID_GLOBAL_ERR);
112         struct feature_fme_pcie0_error pcie0_err;
113         int ret = 0;
114
115         spinlock_lock(&fme->lock);
116         writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
117
118         pcie0_err.csr = readq(&fme_err->pcie0_err);
119         if (val != pcie0_err.csr)
120                 ret = -EBUSY;
121         else
122                 writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
123                        &fme_err->pcie0_err);
124
125         writeq(0UL, &fme_err->pcie0_err_mask);
126         spinlock_unlock(&fme->lock);
127
128         return ret;
129 }
130
131 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
132 {
133         struct feature_fme_err *fme_err
134                 = get_fme_feature_ioaddr_by_index(fme,
135                                                   FME_FEATURE_ID_GLOBAL_ERR);
136         struct feature_fme_pcie1_error pcie1_err;
137
138         pcie1_err.csr = readq(&fme_err->pcie1_err);
139         *val = pcie1_err.csr;
140
141         return 0;
142 }
143
144 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
145 {
146         struct feature_fme_err *fme_err
147                 = get_fme_feature_ioaddr_by_index(fme,
148                                                   FME_FEATURE_ID_GLOBAL_ERR);
149         struct feature_fme_pcie1_error pcie1_err;
150         int ret = 0;
151
152         spinlock_lock(&fme->lock);
153         writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
154
155         pcie1_err.csr = readq(&fme_err->pcie1_err);
156         if (val != pcie1_err.csr)
157                 ret = -EBUSY;
158         else
159                 writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
160                        &fme_err->pcie1_err);
161
162         writeq(0UL, &fme_err->pcie1_err_mask);
163         spinlock_unlock(&fme->lock);
164
165         return ret;
166 }
167
168 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
169 {
170         struct feature_fme_err *fme_err
171                 = get_fme_feature_ioaddr_by_index(fme,
172                                                   FME_FEATURE_ID_GLOBAL_ERR);
173         struct feature_fme_ras_nonfaterror ras_nonfaterr;
174
175         ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
176         *val = ras_nonfaterr.csr;
177
178         return 0;
179 }
180
181 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
182 {
183         struct feature_fme_err *fme_err
184                 = get_fme_feature_ioaddr_by_index(fme,
185                                                   FME_FEATURE_ID_GLOBAL_ERR);
186         struct feature_fme_ras_catfaterror ras_catfaterr;
187
188         ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
189         *val = ras_catfaterr.csr;
190
191         return 0;
192 }
193
194 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
195 {
196         struct feature_fme_err *fme_err
197                 = get_fme_feature_ioaddr_by_index(fme,
198                                                   FME_FEATURE_ID_GLOBAL_ERR);
199         struct feature_fme_ras_error_inj ras_error_inj;
200
201         ras_error_inj.csr = readq(&fme_err->ras_error_inj);
202         *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
203
204         return 0;
205 }
206
207 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
208 {
209         struct feature_fme_err *fme_err
210                 = get_fme_feature_ioaddr_by_index(fme,
211                                               FME_FEATURE_ID_GLOBAL_ERR);
212         struct feature_fme_ras_error_inj ras_error_inj;
213
214         spinlock_lock(&fme->lock);
215         ras_error_inj.csr = readq(&fme_err->ras_error_inj);
216
217         if (val <= FME_RAS_ERROR_INJ_MASK) {
218                 ras_error_inj.csr = val;
219         } else {
220                 spinlock_unlock(&fme->lock);
221                 return -EINVAL;
222         }
223
224         writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
225         spinlock_unlock(&fme->lock);
226
227         return 0;
228 }
229
230 static void fme_error_enable(struct ifpga_fme_hw *fme)
231 {
232         struct feature_fme_err *fme_err
233                 = get_fme_feature_ioaddr_by_index(fme,
234                                                   FME_FEATURE_ID_GLOBAL_ERR);
235
236         writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
237         writeq(0UL, &fme_err->pcie0_err_mask);
238         writeq(0UL, &fme_err->pcie1_err_mask);
239         writeq(0UL, &fme_err->ras_nonfat_mask);
240         writeq(0UL, &fme_err->ras_catfat_mask);
241 }
242
243 static int fme_global_error_init(struct ifpga_feature *feature)
244 {
245         struct ifpga_fme_hw *fme = feature->parent;
246
247         fme_error_enable(fme);
248
249         if (feature->ctx_num)
250                 fme->capability |= FPGA_FME_CAP_ERR_IRQ;
251
252         return 0;
253 }
254
255 static void fme_global_error_uinit(struct ifpga_feature *feature)
256 {
257         UNUSED(feature);
258 }
259
260 static int fme_err_check_seu(struct feature_fme_err *fme_err)
261 {
262         struct feature_fme_error_capability error_cap;
263
264         error_cap.csr = readq(&fme_err->fme_err_capability);
265
266         return error_cap.seu_support ? 1 : 0;
267 }
268
269 static int fme_err_get_seu_emr(struct ifpga_fme_hw *fme,
270                 u64 *val, bool high)
271 {
272         struct feature_fme_err *fme_err
273                 = get_fme_feature_ioaddr_by_index(fme,
274                                 FME_FEATURE_ID_GLOBAL_ERR);
275
276         if (!fme_err_check_seu(fme_err))
277                 return -ENODEV;
278
279         if (high)
280                 *val = readq(&fme_err->seu_emr_h);
281         else
282                 *val = readq(&fme_err->seu_emr_l);
283
284         return 0;
285 }
286
287 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
288                                     struct feature_prop *prop)
289 {
290         struct ifpga_fme_hw *fme = feature->parent;
291         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
292
293         switch (id) {
294         case 0x1: /* ERRORS */
295                 return fme_err_get_errors(fme, &prop->data);
296         case 0x2: /* FIRST_ERROR */
297                 return fme_err_get_first_error(fme, &prop->data);
298         case 0x3: /* NEXT_ERROR */
299                 return fme_err_get_next_error(fme, &prop->data);
300         case 0x5: /* SEU EMR LOW */
301                 return fme_err_get_seu_emr(fme, &prop->data, 0);
302         case 0x6: /* SEU EMR HIGH */
303                 return fme_err_get_seu_emr(fme, &prop->data, 1);
304         }
305
306         return -ENOENT;
307 }
308
309 static int fme_err_root_get_prop(struct ifpga_feature *feature,
310                                  struct feature_prop *prop)
311 {
312         struct ifpga_fme_hw *fme = feature->parent;
313         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
314
315         switch (id) {
316         case 0x5: /* REVISION */
317                 return fme_err_get_revision(fme, &prop->data);
318         case 0x6: /* PCIE0_ERRORS */
319                 return fme_err_get_pcie0_errors(fme, &prop->data);
320         case 0x7: /* PCIE1_ERRORS */
321                 return fme_err_get_pcie1_errors(fme, &prop->data);
322         case 0x8: /* NONFATAL_ERRORS */
323                 return fme_err_get_nonfatal_errors(fme, &prop->data);
324         case 0x9: /* CATFATAL_ERRORS */
325                 return fme_err_get_catfatal_errors(fme, &prop->data);
326         case 0xa: /* INJECT_ERRORS */
327                 return fme_err_get_inject_errors(fme, &prop->data);
328         case 0xb: /* REVISION*/
329                 return fme_err_get_revision(fme, &prop->data);
330         }
331
332         return -ENOENT;
333 }
334
335 static int fme_global_error_get_prop(struct ifpga_feature *feature,
336                                      struct feature_prop *prop)
337 {
338         u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
339         u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
340
341         /* PROP_SUB is never used */
342         if (sub != PROP_SUB_UNUSED)
343                 return -ENOENT;
344
345         switch (top) {
346         case ERR_PROP_TOP_FME_ERR:
347                 return fme_err_fme_err_get_prop(feature, prop);
348         case ERR_PROP_TOP_UNUSED:
349                 return fme_err_root_get_prop(feature, prop);
350         }
351
352         return -ENOENT;
353 }
354
355 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
356                                     struct feature_prop *prop)
357 {
358         struct ifpga_fme_hw *fme = feature->parent;
359         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
360
361         switch (id) {
362         case 0x4: /* CLEAR */
363                 return fme_err_set_clear(fme, prop->data);
364         }
365
366         return -ENOENT;
367 }
368
369 static int fme_err_root_set_prop(struct ifpga_feature *feature,
370                                  struct feature_prop *prop)
371 {
372         struct ifpga_fme_hw *fme = feature->parent;
373         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
374
375         switch (id) {
376         case 0x6: /* PCIE0_ERRORS */
377                 return fme_err_set_pcie0_errors(fme, prop->data);
378         case 0x7: /* PCIE1_ERRORS */
379                 return fme_err_set_pcie1_errors(fme, prop->data);
380         case 0xa: /* INJECT_ERRORS */
381                 return fme_err_set_inject_errors(fme, prop->data);
382         }
383
384         return -ENOENT;
385 }
386
387 static int fme_global_error_set_prop(struct ifpga_feature *feature,
388                                      struct feature_prop *prop)
389 {
390         u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
391         u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
392
393         /* PROP_SUB is never used */
394         if (sub != PROP_SUB_UNUSED)
395                 return -ENOENT;
396
397         switch (top) {
398         case ERR_PROP_TOP_FME_ERR:
399                 return fme_err_fme_err_set_prop(feature, prop);
400         case ERR_PROP_TOP_UNUSED:
401                 return fme_err_root_set_prop(feature, prop);
402         }
403
404         return -ENOENT;
405 }
406
407 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
408 {
409         struct fpga_fme_err_irq_set *err_irq_set = irq_set;
410         struct ifpga_fme_hw *fme;
411         int ret;
412
413         fme = (struct ifpga_fme_hw *)feature->parent;
414
415         if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
416                 return -ENODEV;
417
418         spinlock_lock(&fme->lock);
419         ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
420         spinlock_unlock(&fme->lock);
421
422         return ret;
423 }
424
425 struct ifpga_feature_ops fme_global_err_ops = {
426         .init = fme_global_error_init,
427         .uinit = fme_global_error_uinit,
428         .get_prop = fme_global_error_get_prop,
429         .set_prop = fme_global_error_set_prop,
430         .set_irq = fme_global_err_set_irq,
431 };