1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
9 struct feature_fme_err *fme_err
10 = get_fme_feature_ioaddr_by_index(fme,
11 FME_FEATURE_ID_GLOBAL_ERR);
12 struct feature_fme_error0 fme_error0;
14 fme_error0.csr = readq(&fme_err->fme_err);
15 *val = fme_error0.csr;
20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
22 struct feature_fme_err *fme_err
23 = get_fme_feature_ioaddr_by_index(fme,
24 FME_FEATURE_ID_GLOBAL_ERR);
25 struct feature_fme_first_error fme_first_err;
27 fme_first_err.csr = readq(&fme_err->fme_first_err);
28 *val = fme_first_err.err_reg_status;
33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
35 struct feature_fme_err *fme_err
36 = get_fme_feature_ioaddr_by_index(fme,
37 FME_FEATURE_ID_GLOBAL_ERR);
38 struct feature_fme_next_error fme_next_err;
40 fme_next_err.csr = readq(&fme_err->fme_next_err);
41 *val = fme_next_err.err_reg_status;
46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
48 struct feature_fme_err *fme_err
49 = get_fme_feature_ioaddr_by_index(fme,
50 FME_FEATURE_ID_GLOBAL_ERR);
52 spinlock_lock(&fme->lock);
54 writeq(val, &fme_err->fme_err);
56 spinlock_unlock(&fme->lock);
61 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
63 struct feature_fme_err *fme_err
64 = get_fme_feature_ioaddr_by_index(fme,
65 FME_FEATURE_ID_GLOBAL_ERR);
66 struct feature_header header;
68 header.csr = readq(&fme_err->header);
69 *val = header.revision;
74 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
76 struct feature_fme_err *fme_err
77 = get_fme_feature_ioaddr_by_index(fme,
78 FME_FEATURE_ID_GLOBAL_ERR);
79 struct feature_fme_pcie0_error pcie0_err;
81 pcie0_err.csr = readq(&fme_err->pcie0_err);
87 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
89 struct feature_fme_err *fme_err
90 = get_fme_feature_ioaddr_by_index(fme,
91 FME_FEATURE_ID_GLOBAL_ERR);
92 struct feature_fme_pcie0_error pcie0_err;
95 spinlock_lock(&fme->lock);
96 writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
98 pcie0_err.csr = readq(&fme_err->pcie0_err);
99 if (val != pcie0_err.csr)
102 writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
103 &fme_err->pcie0_err);
105 writeq(0UL, &fme_err->pcie0_err_mask);
106 spinlock_unlock(&fme->lock);
111 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
113 struct feature_fme_err *fme_err
114 = get_fme_feature_ioaddr_by_index(fme,
115 FME_FEATURE_ID_GLOBAL_ERR);
116 struct feature_fme_pcie1_error pcie1_err;
118 pcie1_err.csr = readq(&fme_err->pcie1_err);
119 *val = pcie1_err.csr;
124 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
126 struct feature_fme_err *fme_err
127 = get_fme_feature_ioaddr_by_index(fme,
128 FME_FEATURE_ID_GLOBAL_ERR);
129 struct feature_fme_pcie1_error pcie1_err;
132 spinlock_lock(&fme->lock);
133 writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
135 pcie1_err.csr = readq(&fme_err->pcie1_err);
136 if (val != pcie1_err.csr)
139 writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
140 &fme_err->pcie1_err);
142 writeq(0UL, &fme_err->pcie1_err_mask);
143 spinlock_unlock(&fme->lock);
148 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
150 struct feature_fme_err *fme_err
151 = get_fme_feature_ioaddr_by_index(fme,
152 FME_FEATURE_ID_GLOBAL_ERR);
153 struct feature_fme_ras_nonfaterror ras_nonfaterr;
155 ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
156 *val = ras_nonfaterr.csr;
161 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
163 struct feature_fme_err *fme_err
164 = get_fme_feature_ioaddr_by_index(fme,
165 FME_FEATURE_ID_GLOBAL_ERR);
166 struct feature_fme_ras_catfaterror ras_catfaterr;
168 ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
169 *val = ras_catfaterr.csr;
174 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
176 struct feature_fme_err *fme_err
177 = get_fme_feature_ioaddr_by_index(fme,
178 FME_FEATURE_ID_GLOBAL_ERR);
179 struct feature_fme_ras_error_inj ras_error_inj;
181 ras_error_inj.csr = readq(&fme_err->ras_error_inj);
182 *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
187 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
189 struct feature_fme_err *fme_err
190 = get_fme_feature_ioaddr_by_index(fme,
191 FME_FEATURE_ID_GLOBAL_ERR);
192 struct feature_fme_ras_error_inj ras_error_inj;
194 spinlock_lock(&fme->lock);
195 ras_error_inj.csr = readq(&fme_err->ras_error_inj);
197 if (val <= FME_RAS_ERROR_INJ_MASK) {
198 ras_error_inj.csr = val;
200 spinlock_unlock(&fme->lock);
204 writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
205 spinlock_unlock(&fme->lock);
210 static void fme_error_enable(struct ifpga_fme_hw *fme)
212 struct feature_fme_err *fme_err
213 = get_fme_feature_ioaddr_by_index(fme,
214 FME_FEATURE_ID_GLOBAL_ERR);
216 writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
217 writeq(0UL, &fme_err->pcie0_err_mask);
218 writeq(0UL, &fme_err->pcie1_err_mask);
219 writeq(0UL, &fme_err->ras_nonfat_mask);
220 writeq(0UL, &fme_err->ras_catfat_mask);
223 static int fme_global_error_init(struct ifpga_feature *feature)
225 struct ifpga_fme_hw *fme = feature->parent;
227 fme_error_enable(fme);
229 if (feature->ctx_num)
230 fme->capability |= FPGA_FME_CAP_ERR_IRQ;
235 static void fme_global_error_uinit(struct ifpga_feature *feature)
240 static int fme_err_check_seu(struct feature_fme_err *fme_err)
242 struct feature_fme_error_capability error_cap;
244 error_cap.csr = readq(&fme_err->fme_err_capability);
246 return error_cap.seu_support ? 1 : 0;
249 static int fme_err_get_seu_emr(struct ifpga_fme_hw *fme,
252 struct feature_fme_err *fme_err
253 = get_fme_feature_ioaddr_by_index(fme,
254 FME_FEATURE_ID_GLOBAL_ERR);
256 if (!fme_err_check_seu(fme_err))
260 *val = readq(&fme_err->seu_emr_h);
262 *val = readq(&fme_err->seu_emr_l);
267 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
268 struct feature_prop *prop)
270 struct ifpga_fme_hw *fme = feature->parent;
271 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
274 case 0x1: /* ERRORS */
275 return fme_err_get_errors(fme, &prop->data);
276 case 0x2: /* FIRST_ERROR */
277 return fme_err_get_first_error(fme, &prop->data);
278 case 0x3: /* NEXT_ERROR */
279 return fme_err_get_next_error(fme, &prop->data);
280 case 0x5: /* SEU EMR LOW */
281 return fme_err_get_seu_emr(fme, &prop->data, 0);
282 case 0x6: /* SEU EMR HIGH */
283 return fme_err_get_seu_emr(fme, &prop->data, 1);
289 static int fme_err_root_get_prop(struct ifpga_feature *feature,
290 struct feature_prop *prop)
292 struct ifpga_fme_hw *fme = feature->parent;
293 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
296 case 0x5: /* REVISION */
297 return fme_err_get_revision(fme, &prop->data);
298 case 0x6: /* PCIE0_ERRORS */
299 return fme_err_get_pcie0_errors(fme, &prop->data);
300 case 0x7: /* PCIE1_ERRORS */
301 return fme_err_get_pcie1_errors(fme, &prop->data);
302 case 0x8: /* NONFATAL_ERRORS */
303 return fme_err_get_nonfatal_errors(fme, &prop->data);
304 case 0x9: /* CATFATAL_ERRORS */
305 return fme_err_get_catfatal_errors(fme, &prop->data);
306 case 0xa: /* INJECT_ERRORS */
307 return fme_err_get_inject_errors(fme, &prop->data);
308 case 0xb: /* REVISION*/
309 return fme_err_get_revision(fme, &prop->data);
315 static int fme_global_error_get_prop(struct ifpga_feature *feature,
316 struct feature_prop *prop)
318 u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
319 u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
321 /* PROP_SUB is never used */
322 if (sub != PROP_SUB_UNUSED)
326 case ERR_PROP_TOP_FME_ERR:
327 return fme_err_fme_err_get_prop(feature, prop);
328 case ERR_PROP_TOP_UNUSED:
329 return fme_err_root_get_prop(feature, prop);
335 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
336 struct feature_prop *prop)
338 struct ifpga_fme_hw *fme = feature->parent;
339 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
342 case 0x4: /* CLEAR */
343 return fme_err_set_clear(fme, prop->data);
349 static int fme_err_root_set_prop(struct ifpga_feature *feature,
350 struct feature_prop *prop)
352 struct ifpga_fme_hw *fme = feature->parent;
353 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
356 case 0x6: /* PCIE0_ERRORS */
357 return fme_err_set_pcie0_errors(fme, prop->data);
358 case 0x7: /* PCIE1_ERRORS */
359 return fme_err_set_pcie1_errors(fme, prop->data);
360 case 0xa: /* INJECT_ERRORS */
361 return fme_err_set_inject_errors(fme, prop->data);
367 static int fme_global_error_set_prop(struct ifpga_feature *feature,
368 struct feature_prop *prop)
370 u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
371 u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
373 /* PROP_SUB is never used */
374 if (sub != PROP_SUB_UNUSED)
378 case ERR_PROP_TOP_FME_ERR:
379 return fme_err_fme_err_set_prop(feature, prop);
380 case ERR_PROP_TOP_UNUSED:
381 return fme_err_root_set_prop(feature, prop);
387 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
389 struct fpga_fme_err_irq_set *err_irq_set = irq_set;
390 struct ifpga_fme_hw *fme;
393 fme = (struct ifpga_fme_hw *)feature->parent;
395 if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
398 spinlock_lock(&fme->lock);
399 ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
400 spinlock_unlock(&fme->lock);
405 struct ifpga_feature_ops fme_global_err_ops = {
406 .init = fme_global_error_init,
407 .uinit = fme_global_error_uinit,
408 .get_prop = fme_global_error_get_prop,
409 .set_prop = fme_global_error_set_prop,
410 .set_irq = fme_global_err_set_irq,