2978c7968cf1b5150d27c695a25436d39b5120b5
[dpdk.git] / drivers / raw / ifpga / base / ifpga_fme_error.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include "ifpga_feature_dev.h"
6
7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
8 {
9         struct feature_fme_err *fme_err
10                 = get_fme_feature_ioaddr_by_index(fme,
11                                                   FME_FEATURE_ID_GLOBAL_ERR);
12         struct feature_fme_error0 fme_error0;
13
14         fme_error0.csr = readq(&fme_err->fme_err);
15         *val = fme_error0.csr;
16
17         return 0;
18 }
19
20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
21 {
22         struct feature_fme_err *fme_err
23                 = get_fme_feature_ioaddr_by_index(fme,
24                                                   FME_FEATURE_ID_GLOBAL_ERR);
25         struct feature_fme_first_error fme_first_err;
26
27         fme_first_err.csr = readq(&fme_err->fme_first_err);
28         *val = fme_first_err.err_reg_status;
29
30         return 0;
31 }
32
33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
34 {
35         struct feature_fme_err *fme_err
36                 = get_fme_feature_ioaddr_by_index(fme,
37                                                   FME_FEATURE_ID_GLOBAL_ERR);
38         struct feature_fme_next_error fme_next_err;
39
40         fme_next_err.csr = readq(&fme_err->fme_next_err);
41         *val = fme_next_err.err_reg_status;
42
43         return 0;
44 }
45
46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
47 {
48         struct feature_fme_err *fme_err
49                 = get_fme_feature_ioaddr_by_index(fme,
50                                                   FME_FEATURE_ID_GLOBAL_ERR);
51         struct feature_fme_error0 fme_error0;
52         struct feature_fme_first_error fme_first_err;
53         struct feature_fme_next_error fme_next_err;
54         int ret = 0;
55
56         spinlock_lock(&fme->lock);
57         writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
58
59         fme_error0.csr = readq(&fme_err->fme_err);
60         if (val != fme_error0.csr) {
61                 ret = -EBUSY;
62                 goto exit;
63         }
64
65         fme_first_err.csr = readq(&fme_err->fme_first_err);
66         fme_next_err.csr = readq(&fme_err->fme_next_err);
67
68         writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
69         writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
70                &fme_err->fme_first_err);
71         writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
72                &fme_err->fme_next_err);
73
74 exit:
75         writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
76         spinlock_unlock(&fme->lock);
77
78         return ret;
79 }
80
81 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
82 {
83         struct feature_fme_err *fme_err
84                 = get_fme_feature_ioaddr_by_index(fme,
85                                                   FME_FEATURE_ID_GLOBAL_ERR);
86         struct feature_header header;
87
88         header.csr = readq(&fme_err->header);
89         *val = header.revision;
90
91         return 0;
92 }
93
94 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
95 {
96         struct feature_fme_err *fme_err
97                 = get_fme_feature_ioaddr_by_index(fme,
98                                                   FME_FEATURE_ID_GLOBAL_ERR);
99         struct feature_fme_pcie0_error pcie0_err;
100
101         pcie0_err.csr = readq(&fme_err->pcie0_err);
102         *val = pcie0_err.csr;
103
104         return 0;
105 }
106
107 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
108 {
109         struct feature_fme_err *fme_err
110                 = get_fme_feature_ioaddr_by_index(fme,
111                                                   FME_FEATURE_ID_GLOBAL_ERR);
112         struct feature_fme_pcie0_error pcie0_err;
113         int ret = 0;
114
115         spinlock_lock(&fme->lock);
116         writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
117
118         pcie0_err.csr = readq(&fme_err->pcie0_err);
119         if (val != pcie0_err.csr)
120                 ret = -EBUSY;
121         else
122                 writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
123                        &fme_err->pcie0_err);
124
125         writeq(0UL, &fme_err->pcie0_err_mask);
126         spinlock_unlock(&fme->lock);
127
128         return ret;
129 }
130
131 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
132 {
133         struct feature_fme_err *fme_err
134                 = get_fme_feature_ioaddr_by_index(fme,
135                                                   FME_FEATURE_ID_GLOBAL_ERR);
136         struct feature_fme_pcie1_error pcie1_err;
137
138         pcie1_err.csr = readq(&fme_err->pcie1_err);
139         *val = pcie1_err.csr;
140
141         return 0;
142 }
143
144 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
145 {
146         struct feature_fme_err *fme_err
147                 = get_fme_feature_ioaddr_by_index(fme,
148                                                   FME_FEATURE_ID_GLOBAL_ERR);
149         struct feature_fme_pcie1_error pcie1_err;
150         int ret = 0;
151
152         spinlock_lock(&fme->lock);
153         writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
154
155         pcie1_err.csr = readq(&fme_err->pcie1_err);
156         if (val != pcie1_err.csr)
157                 ret = -EBUSY;
158         else
159                 writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
160                        &fme_err->pcie1_err);
161
162         writeq(0UL, &fme_err->pcie1_err_mask);
163         spinlock_unlock(&fme->lock);
164
165         return ret;
166 }
167
168 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
169 {
170         struct feature_fme_err *fme_err
171                 = get_fme_feature_ioaddr_by_index(fme,
172                                                   FME_FEATURE_ID_GLOBAL_ERR);
173         struct feature_fme_ras_nonfaterror ras_nonfaterr;
174
175         ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
176         *val = ras_nonfaterr.csr;
177
178         return 0;
179 }
180
181 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
182 {
183         struct feature_fme_err *fme_err
184                 = get_fme_feature_ioaddr_by_index(fme,
185                                                   FME_FEATURE_ID_GLOBAL_ERR);
186         struct feature_fme_ras_catfaterror ras_catfaterr;
187
188         ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
189         *val = ras_catfaterr.csr;
190
191         return 0;
192 }
193
194 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
195 {
196         struct feature_fme_err *fme_err
197                 = get_fme_feature_ioaddr_by_index(fme,
198                                                   FME_FEATURE_ID_GLOBAL_ERR);
199         struct feature_fme_ras_error_inj ras_error_inj;
200
201         ras_error_inj.csr = readq(&fme_err->ras_error_inj);
202         *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
203
204         return 0;
205 }
206
207 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
208 {
209         struct feature_fme_err *fme_err
210                 = get_fme_feature_ioaddr_by_index(fme,
211                                               FME_FEATURE_ID_GLOBAL_ERR);
212         struct feature_fme_ras_error_inj ras_error_inj;
213
214         spinlock_lock(&fme->lock);
215         ras_error_inj.csr = readq(&fme_err->ras_error_inj);
216
217         if (val <= FME_RAS_ERROR_INJ_MASK) {
218                 ras_error_inj.csr = val;
219         } else {
220                 spinlock_unlock(&fme->lock);
221                 return -EINVAL;
222         }
223
224         writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
225         spinlock_unlock(&fme->lock);
226
227         return 0;
228 }
229
230 static void fme_error_enable(struct ifpga_fme_hw *fme)
231 {
232         struct feature_fme_err *fme_err
233                 = get_fme_feature_ioaddr_by_index(fme,
234                                                   FME_FEATURE_ID_GLOBAL_ERR);
235
236         writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
237         writeq(0UL, &fme_err->pcie0_err_mask);
238         writeq(0UL, &fme_err->pcie1_err_mask);
239         writeq(0UL, &fme_err->ras_nonfat_mask);
240         writeq(0UL, &fme_err->ras_catfat_mask);
241 }
242
243 static int fme_global_error_init(struct ifpga_feature *feature)
244 {
245         struct ifpga_fme_hw *fme = feature->parent;
246
247         fme_error_enable(fme);
248
249         if (feature->ctx_num)
250                 fme->capability |= FPGA_FME_CAP_ERR_IRQ;
251
252         return 0;
253 }
254
255 static void fme_global_error_uinit(struct ifpga_feature *feature)
256 {
257         UNUSED(feature);
258 }
259
260 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
261                                     struct feature_prop *prop)
262 {
263         struct ifpga_fme_hw *fme = feature->parent;
264         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
265
266         switch (id) {
267         case 0x1: /* ERRORS */
268                 return fme_err_get_errors(fme, &prop->data);
269         case 0x2: /* FIRST_ERROR */
270                 return fme_err_get_first_error(fme, &prop->data);
271         case 0x3: /* NEXT_ERROR */
272                 return fme_err_get_next_error(fme, &prop->data);
273         }
274
275         return -ENOENT;
276 }
277
278 static int fme_err_root_get_prop(struct ifpga_feature *feature,
279                                  struct feature_prop *prop)
280 {
281         struct ifpga_fme_hw *fme = feature->parent;
282         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
283
284         switch (id) {
285         case 0x5: /* REVISION */
286                 return fme_err_get_revision(fme, &prop->data);
287         case 0x6: /* PCIE0_ERRORS */
288                 return fme_err_get_pcie0_errors(fme, &prop->data);
289         case 0x7: /* PCIE1_ERRORS */
290                 return fme_err_get_pcie1_errors(fme, &prop->data);
291         case 0x8: /* NONFATAL_ERRORS */
292                 return fme_err_get_nonfatal_errors(fme, &prop->data);
293         case 0x9: /* CATFATAL_ERRORS */
294                 return fme_err_get_catfatal_errors(fme, &prop->data);
295         case 0xa: /* INJECT_ERRORS */
296                 return fme_err_get_inject_errors(fme, &prop->data);
297         case 0xb: /* REVISION*/
298                 return fme_err_get_revision(fme, &prop->data);
299         }
300
301         return -ENOENT;
302 }
303
304 static int fme_global_error_get_prop(struct ifpga_feature *feature,
305                                      struct feature_prop *prop)
306 {
307         u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
308         u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
309
310         /* PROP_SUB is never used */
311         if (sub != PROP_SUB_UNUSED)
312                 return -ENOENT;
313
314         switch (top) {
315         case ERR_PROP_TOP_FME_ERR:
316                 return fme_err_fme_err_get_prop(feature, prop);
317         case ERR_PROP_TOP_UNUSED:
318                 return fme_err_root_get_prop(feature, prop);
319         }
320
321         return -ENOENT;
322 }
323
324 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
325                                     struct feature_prop *prop)
326 {
327         struct ifpga_fme_hw *fme = feature->parent;
328         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
329
330         switch (id) {
331         case 0x4: /* CLEAR */
332                 return fme_err_set_clear(fme, prop->data);
333         }
334
335         return -ENOENT;
336 }
337
338 static int fme_err_root_set_prop(struct ifpga_feature *feature,
339                                  struct feature_prop *prop)
340 {
341         struct ifpga_fme_hw *fme = feature->parent;
342         u16 id = GET_FIELD(PROP_ID, prop->prop_id);
343
344         switch (id) {
345         case 0x6: /* PCIE0_ERRORS */
346                 return fme_err_set_pcie0_errors(fme, prop->data);
347         case 0x7: /* PCIE1_ERRORS */
348                 return fme_err_set_pcie1_errors(fme, prop->data);
349         case 0xa: /* INJECT_ERRORS */
350                 return fme_err_set_inject_errors(fme, prop->data);
351         }
352
353         return -ENOENT;
354 }
355
356 static int fme_global_error_set_prop(struct ifpga_feature *feature,
357                                      struct feature_prop *prop)
358 {
359         u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
360         u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
361
362         /* PROP_SUB is never used */
363         if (sub != PROP_SUB_UNUSED)
364                 return -ENOENT;
365
366         switch (top) {
367         case ERR_PROP_TOP_FME_ERR:
368                 return fme_err_fme_err_set_prop(feature, prop);
369         case ERR_PROP_TOP_UNUSED:
370                 return fme_err_root_set_prop(feature, prop);
371         }
372
373         return -ENOENT;
374 }
375
376 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
377 {
378         struct fpga_fme_err_irq_set *err_irq_set = irq_set;
379         struct ifpga_fme_hw *fme;
380         int ret;
381
382         fme = (struct ifpga_fme_hw *)feature->parent;
383
384         if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
385                 return -ENODEV;
386
387         spinlock_lock(&fme->lock);
388         ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
389         spinlock_unlock(&fme->lock);
390
391         return ret;
392 }
393
394 struct ifpga_feature_ops fme_global_err_ops = {
395         .init = fme_global_error_init,
396         .uinit = fme_global_error_uinit,
397         .get_prop = fme_global_error_get_prop,
398         .set_prop = fme_global_error_set_prop,
399         .set_irq = fme_global_err_set_irq,
400 };