1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
9 struct feature_fme_err *fme_err
10 = get_fme_feature_ioaddr_by_index(fme,
11 FME_FEATURE_ID_GLOBAL_ERR);
12 struct feature_fme_error0 fme_error0;
14 fme_error0.csr = readq(&fme_err->fme_err);
15 *val = fme_error0.csr;
20 static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
22 struct feature_fme_err *fme_err
23 = get_fme_feature_ioaddr_by_index(fme,
24 FME_FEATURE_ID_GLOBAL_ERR);
25 struct feature_fme_first_error fme_first_err;
27 fme_first_err.csr = readq(&fme_err->fme_first_err);
28 *val = fme_first_err.err_reg_status;
33 static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
35 struct feature_fme_err *fme_err
36 = get_fme_feature_ioaddr_by_index(fme,
37 FME_FEATURE_ID_GLOBAL_ERR);
38 struct feature_fme_next_error fme_next_err;
40 fme_next_err.csr = readq(&fme_err->fme_next_err);
41 *val = fme_next_err.err_reg_status;
46 static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
48 struct feature_fme_err *fme_err
49 = get_fme_feature_ioaddr_by_index(fme,
50 FME_FEATURE_ID_GLOBAL_ERR);
51 struct feature_fme_error0 fme_error0;
52 struct feature_fme_first_error fme_first_err;
53 struct feature_fme_next_error fme_next_err;
56 spinlock_lock(&fme->lock);
57 writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
59 fme_error0.csr = readq(&fme_err->fme_err);
60 if (val != fme_error0.csr) {
65 fme_first_err.csr = readq(&fme_err->fme_first_err);
66 fme_next_err.csr = readq(&fme_err->fme_next_err);
68 writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
69 writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
70 &fme_err->fme_first_err);
71 writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
72 &fme_err->fme_next_err);
75 writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
76 spinlock_unlock(&fme->lock);
81 static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
83 struct feature_fme_err *fme_err
84 = get_fme_feature_ioaddr_by_index(fme,
85 FME_FEATURE_ID_GLOBAL_ERR);
86 struct feature_header header;
88 header.csr = readq(&fme_err->header);
89 *val = header.revision;
94 static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
96 struct feature_fme_err *fme_err
97 = get_fme_feature_ioaddr_by_index(fme,
98 FME_FEATURE_ID_GLOBAL_ERR);
99 struct feature_fme_pcie0_error pcie0_err;
101 pcie0_err.csr = readq(&fme_err->pcie0_err);
102 *val = pcie0_err.csr;
107 static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
109 struct feature_fme_err *fme_err
110 = get_fme_feature_ioaddr_by_index(fme,
111 FME_FEATURE_ID_GLOBAL_ERR);
112 struct feature_fme_pcie0_error pcie0_err;
115 spinlock_lock(&fme->lock);
116 writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
118 pcie0_err.csr = readq(&fme_err->pcie0_err);
119 if (val != pcie0_err.csr)
122 writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
123 &fme_err->pcie0_err);
125 writeq(0UL, &fme_err->pcie0_err_mask);
126 spinlock_unlock(&fme->lock);
131 static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
133 struct feature_fme_err *fme_err
134 = get_fme_feature_ioaddr_by_index(fme,
135 FME_FEATURE_ID_GLOBAL_ERR);
136 struct feature_fme_pcie1_error pcie1_err;
138 pcie1_err.csr = readq(&fme_err->pcie1_err);
139 *val = pcie1_err.csr;
144 static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
146 struct feature_fme_err *fme_err
147 = get_fme_feature_ioaddr_by_index(fme,
148 FME_FEATURE_ID_GLOBAL_ERR);
149 struct feature_fme_pcie1_error pcie1_err;
152 spinlock_lock(&fme->lock);
153 writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
155 pcie1_err.csr = readq(&fme_err->pcie1_err);
156 if (val != pcie1_err.csr)
159 writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
160 &fme_err->pcie1_err);
162 writeq(0UL, &fme_err->pcie1_err_mask);
163 spinlock_unlock(&fme->lock);
168 static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
170 struct feature_fme_err *fme_err
171 = get_fme_feature_ioaddr_by_index(fme,
172 FME_FEATURE_ID_GLOBAL_ERR);
173 struct feature_fme_ras_nonfaterror ras_nonfaterr;
175 ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
176 *val = ras_nonfaterr.csr;
181 static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
183 struct feature_fme_err *fme_err
184 = get_fme_feature_ioaddr_by_index(fme,
185 FME_FEATURE_ID_GLOBAL_ERR);
186 struct feature_fme_ras_catfaterror ras_catfaterr;
188 ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
189 *val = ras_catfaterr.csr;
194 static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
196 struct feature_fme_err *fme_err
197 = get_fme_feature_ioaddr_by_index(fme,
198 FME_FEATURE_ID_GLOBAL_ERR);
199 struct feature_fme_ras_error_inj ras_error_inj;
201 ras_error_inj.csr = readq(&fme_err->ras_error_inj);
202 *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
207 static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
209 struct feature_fme_err *fme_err
210 = get_fme_feature_ioaddr_by_index(fme,
211 FME_FEATURE_ID_GLOBAL_ERR);
212 struct feature_fme_ras_error_inj ras_error_inj;
214 spinlock_lock(&fme->lock);
215 ras_error_inj.csr = readq(&fme_err->ras_error_inj);
217 if (val <= FME_RAS_ERROR_INJ_MASK) {
218 ras_error_inj.csr = val;
220 spinlock_unlock(&fme->lock);
224 writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
225 spinlock_unlock(&fme->lock);
230 static void fme_error_enable(struct ifpga_fme_hw *fme)
232 struct feature_fme_err *fme_err
233 = get_fme_feature_ioaddr_by_index(fme,
234 FME_FEATURE_ID_GLOBAL_ERR);
236 writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
237 writeq(0UL, &fme_err->pcie0_err_mask);
238 writeq(0UL, &fme_err->pcie1_err_mask);
239 writeq(0UL, &fme_err->ras_nonfat_mask);
240 writeq(0UL, &fme_err->ras_catfat_mask);
243 static int fme_global_error_init(struct ifpga_feature *feature)
245 struct ifpga_fme_hw *fme = feature->parent;
247 fme_error_enable(fme);
249 if (feature->ctx_num)
250 fme->capability |= FPGA_FME_CAP_ERR_IRQ;
255 static void fme_global_error_uinit(struct ifpga_feature *feature)
260 static int fme_err_fme_err_get_prop(struct ifpga_feature *feature,
261 struct feature_prop *prop)
263 struct ifpga_fme_hw *fme = feature->parent;
264 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
267 case 0x1: /* ERRORS */
268 return fme_err_get_errors(fme, &prop->data);
269 case 0x2: /* FIRST_ERROR */
270 return fme_err_get_first_error(fme, &prop->data);
271 case 0x3: /* NEXT_ERROR */
272 return fme_err_get_next_error(fme, &prop->data);
278 static int fme_err_root_get_prop(struct ifpga_feature *feature,
279 struct feature_prop *prop)
281 struct ifpga_fme_hw *fme = feature->parent;
282 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
285 case 0x5: /* REVISION */
286 return fme_err_get_revision(fme, &prop->data);
287 case 0x6: /* PCIE0_ERRORS */
288 return fme_err_get_pcie0_errors(fme, &prop->data);
289 case 0x7: /* PCIE1_ERRORS */
290 return fme_err_get_pcie1_errors(fme, &prop->data);
291 case 0x8: /* NONFATAL_ERRORS */
292 return fme_err_get_nonfatal_errors(fme, &prop->data);
293 case 0x9: /* CATFATAL_ERRORS */
294 return fme_err_get_catfatal_errors(fme, &prop->data);
295 case 0xa: /* INJECT_ERRORS */
296 return fme_err_get_inject_errors(fme, &prop->data);
297 case 0xb: /* REVISION*/
298 return fme_err_get_revision(fme, &prop->data);
304 static int fme_global_error_get_prop(struct ifpga_feature *feature,
305 struct feature_prop *prop)
307 u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
308 u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
310 /* PROP_SUB is never used */
311 if (sub != PROP_SUB_UNUSED)
315 case ERR_PROP_TOP_FME_ERR:
316 return fme_err_fme_err_get_prop(feature, prop);
317 case ERR_PROP_TOP_UNUSED:
318 return fme_err_root_get_prop(feature, prop);
324 static int fme_err_fme_err_set_prop(struct ifpga_feature *feature,
325 struct feature_prop *prop)
327 struct ifpga_fme_hw *fme = feature->parent;
328 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
331 case 0x4: /* CLEAR */
332 return fme_err_set_clear(fme, prop->data);
338 static int fme_err_root_set_prop(struct ifpga_feature *feature,
339 struct feature_prop *prop)
341 struct ifpga_fme_hw *fme = feature->parent;
342 u16 id = GET_FIELD(PROP_ID, prop->prop_id);
345 case 0x6: /* PCIE0_ERRORS */
346 return fme_err_set_pcie0_errors(fme, prop->data);
347 case 0x7: /* PCIE1_ERRORS */
348 return fme_err_set_pcie1_errors(fme, prop->data);
349 case 0xa: /* INJECT_ERRORS */
350 return fme_err_set_inject_errors(fme, prop->data);
356 static int fme_global_error_set_prop(struct ifpga_feature *feature,
357 struct feature_prop *prop)
359 u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
360 u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
362 /* PROP_SUB is never used */
363 if (sub != PROP_SUB_UNUSED)
367 case ERR_PROP_TOP_FME_ERR:
368 return fme_err_fme_err_set_prop(feature, prop);
369 case ERR_PROP_TOP_UNUSED:
370 return fme_err_root_set_prop(feature, prop);
376 static int fme_global_err_set_irq(struct ifpga_feature *feature, void *irq_set)
378 struct fpga_fme_err_irq_set *err_irq_set = irq_set;
379 struct ifpga_fme_hw *fme;
382 fme = (struct ifpga_fme_hw *)feature->parent;
384 if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ))
387 spinlock_lock(&fme->lock);
388 ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
389 spinlock_unlock(&fme->lock);
394 struct ifpga_feature_ops fme_global_err_ops = {
395 .init = fme_global_error_init,
396 .uinit = fme_global_error_uinit,
397 .get_prop = fme_global_error_get_prop,
398 .set_prop = fme_global_error_set_prop,
399 .set_irq = fme_global_err_set_irq,