1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #include "opae_intel_max10.h"
9 #define PWR_THRESHOLD_MAX 0x7F
11 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
13 struct feature *feature;
18 feature = get_fme_feature_by_id(fme, prop->feature_id);
20 if (feature && feature->ops && feature->ops->get_prop)
21 return feature->ops->get_prop(feature, prop);
26 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
28 struct feature *feature;
33 feature = get_fme_feature_by_id(fme, prop->feature_id);
35 if (feature && feature->ops && feature->ops->set_prop)
36 return feature->ops->set_prop(feature, prop);
41 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
43 struct feature *feature;
48 feature = get_fme_feature_by_id(fme, feature_id);
50 if (feature && feature->ops && feature->ops->set_irq)
51 return feature->ops->set_irq(feature, irq_set);
56 /* fme private feature head */
57 static int fme_hdr_init(struct feature *feature)
59 struct feature_fme_header *fme_hdr;
61 fme_hdr = (struct feature_fme_header *)feature->addr;
63 dev_info(NULL, "FME HDR Init.\n");
64 dev_info(NULL, "FME cap %llx.\n",
65 (unsigned long long)fme_hdr->capability.csr);
70 static void fme_hdr_uinit(struct feature *feature)
74 dev_info(NULL, "FME HDR UInit.\n");
77 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
79 struct feature_fme_header *fme_hdr
80 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
81 struct feature_header header;
83 header.csr = readq(&fme_hdr->header);
84 *revision = header.revision;
89 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
91 struct feature_fme_header *fme_hdr
92 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
93 struct feature_fme_capability fme_capability;
95 fme_capability.csr = readq(&fme_hdr->capability);
96 *ports_num = fme_capability.num_ports;
101 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
103 struct feature_fme_header *fme_hdr
104 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
105 struct feature_fme_capability fme_capability;
107 fme_capability.csr = readq(&fme_hdr->capability);
108 *cache_size = fme_capability.cache_size;
113 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
115 struct feature_fme_header *fme_hdr
116 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
117 struct feature_fme_capability fme_capability;
119 fme_capability.csr = readq(&fme_hdr->capability);
120 *version = fme_capability.fabric_verid;
125 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
127 struct feature_fme_header *fme_hdr
128 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
129 struct feature_fme_capability fme_capability;
131 fme_capability.csr = readq(&fme_hdr->capability);
132 *socket_id = fme_capability.socket_id;
137 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
140 struct feature_fme_header *fme_hdr
141 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
143 *bitstream_id = readq(&fme_hdr->bitstream_id);
148 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
149 u64 *bitstream_metadata)
151 struct feature_fme_header *fme_hdr
152 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
154 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
160 fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
162 struct ifpga_fme_hw *fme = feature->parent;
164 switch (prop->prop_id) {
165 case FME_HDR_PROP_REVISION:
166 return fme_hdr_get_revision(fme, &prop->data);
167 case FME_HDR_PROP_PORTS_NUM:
168 return fme_hdr_get_ports_num(fme, &prop->data);
169 case FME_HDR_PROP_CACHE_SIZE:
170 return fme_hdr_get_cache_size(fme, &prop->data);
171 case FME_HDR_PROP_VERSION:
172 return fme_hdr_get_version(fme, &prop->data);
173 case FME_HDR_PROP_SOCKET_ID:
174 return fme_hdr_get_socket_id(fme, &prop->data);
175 case FME_HDR_PROP_BITSTREAM_ID:
176 return fme_hdr_get_bitstream_id(fme, &prop->data);
177 case FME_HDR_PROP_BITSTREAM_METADATA:
178 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
184 struct feature_ops fme_hdr_ops = {
185 .init = fme_hdr_init,
186 .uinit = fme_hdr_uinit,
187 .get_prop = fme_hdr_get_prop,
190 /* thermal management */
191 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
193 struct feature_fme_thermal *thermal;
194 struct feature_fme_tmp_threshold temp_threshold;
196 thermal = get_fme_feature_ioaddr_by_index(fme,
197 FME_FEATURE_ID_THERMAL_MGMT);
199 temp_threshold.csr = readq(&thermal->threshold);
200 *thres1 = temp_threshold.tmp_thshold1;
205 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
207 struct feature_fme_thermal *thermal;
208 struct feature_fme_header *fme_hdr;
209 struct feature_fme_tmp_threshold tmp_threshold;
210 struct feature_fme_capability fme_capability;
212 thermal = get_fme_feature_ioaddr_by_index(fme,
213 FME_FEATURE_ID_THERMAL_MGMT);
214 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
216 spinlock_lock(&fme->lock);
217 tmp_threshold.csr = readq(&thermal->threshold);
218 fme_capability.csr = readq(&fme_hdr->capability);
220 if (fme_capability.lock_bit == 1) {
221 spinlock_unlock(&fme->lock);
223 } else if (thres1 > 100) {
224 spinlock_unlock(&fme->lock);
226 } else if (thres1 == 0) {
227 tmp_threshold.tmp_thshold1_enable = 0;
228 tmp_threshold.tmp_thshold1 = thres1;
230 tmp_threshold.tmp_thshold1_enable = 1;
231 tmp_threshold.tmp_thshold1 = thres1;
234 writeq(tmp_threshold.csr, &thermal->threshold);
235 spinlock_unlock(&fme->lock);
240 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
242 struct feature_fme_thermal *thermal;
243 struct feature_fme_tmp_threshold temp_threshold;
245 thermal = get_fme_feature_ioaddr_by_index(fme,
246 FME_FEATURE_ID_THERMAL_MGMT);
248 temp_threshold.csr = readq(&thermal->threshold);
249 *thres2 = temp_threshold.tmp_thshold2;
254 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
256 struct feature_fme_thermal *thermal;
257 struct feature_fme_header *fme_hdr;
258 struct feature_fme_tmp_threshold tmp_threshold;
259 struct feature_fme_capability fme_capability;
261 thermal = get_fme_feature_ioaddr_by_index(fme,
262 FME_FEATURE_ID_THERMAL_MGMT);
263 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
265 spinlock_lock(&fme->lock);
266 tmp_threshold.csr = readq(&thermal->threshold);
267 fme_capability.csr = readq(&fme_hdr->capability);
269 if (fme_capability.lock_bit == 1) {
270 spinlock_unlock(&fme->lock);
272 } else if (thres2 > 100) {
273 spinlock_unlock(&fme->lock);
275 } else if (thres2 == 0) {
276 tmp_threshold.tmp_thshold2_enable = 0;
277 tmp_threshold.tmp_thshold2 = thres2;
279 tmp_threshold.tmp_thshold2_enable = 1;
280 tmp_threshold.tmp_thshold2 = thres2;
283 writeq(tmp_threshold.csr, &thermal->threshold);
284 spinlock_unlock(&fme->lock);
289 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
292 struct feature_fme_thermal *thermal;
293 struct feature_fme_tmp_threshold temp_threshold;
295 thermal = get_fme_feature_ioaddr_by_index(fme,
296 FME_FEATURE_ID_THERMAL_MGMT);
298 temp_threshold.csr = readq(&thermal->threshold);
299 *thres_trip = temp_threshold.therm_trip_thshold;
304 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
307 struct feature_fme_thermal *thermal;
308 struct feature_fme_tmp_threshold temp_threshold;
310 thermal = get_fme_feature_ioaddr_by_index(fme,
311 FME_FEATURE_ID_THERMAL_MGMT);
313 temp_threshold.csr = readq(&thermal->threshold);
314 *thres1_reached = temp_threshold.thshold1_status;
319 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
322 struct feature_fme_thermal *thermal;
323 struct feature_fme_tmp_threshold temp_threshold;
325 thermal = get_fme_feature_ioaddr_by_index(fme,
326 FME_FEATURE_ID_THERMAL_MGMT);
328 temp_threshold.csr = readq(&thermal->threshold);
329 *thres1_reached = temp_threshold.thshold2_status;
334 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
337 struct feature_fme_thermal *thermal;
338 struct feature_fme_tmp_threshold temp_threshold;
340 thermal = get_fme_feature_ioaddr_by_index(fme,
341 FME_FEATURE_ID_THERMAL_MGMT);
343 temp_threshold.csr = readq(&thermal->threshold);
344 *thres1_policy = temp_threshold.thshold_policy;
349 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
352 struct feature_fme_thermal *thermal;
353 struct feature_fme_tmp_threshold tmp_threshold;
355 thermal = get_fme_feature_ioaddr_by_index(fme,
356 FME_FEATURE_ID_THERMAL_MGMT);
358 spinlock_lock(&fme->lock);
359 tmp_threshold.csr = readq(&thermal->threshold);
361 if (thres1_policy == 0) {
362 tmp_threshold.thshold_policy = 0;
363 } else if (thres1_policy == 1) {
364 tmp_threshold.thshold_policy = 1;
366 spinlock_unlock(&fme->lock);
370 writeq(tmp_threshold.csr, &thermal->threshold);
371 spinlock_unlock(&fme->lock);
376 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
378 struct feature_fme_thermal *thermal;
379 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
381 thermal = get_fme_feature_ioaddr_by_index(fme,
382 FME_FEATURE_ID_THERMAL_MGMT);
384 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
385 *temp = temp_rdsensor_fmt1.fpga_temp;
390 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
392 struct feature_fme_thermal *fme_thermal
393 = get_fme_feature_ioaddr_by_index(fme,
394 FME_FEATURE_ID_THERMAL_MGMT);
395 struct feature_header header;
397 header.csr = readq(&fme_thermal->header);
398 *revision = header.revision;
403 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
405 static int fme_thermal_mgmt_init(struct feature *feature)
407 struct feature_fme_thermal *fme_thermal;
408 struct feature_fme_tmp_threshold_cap thermal_cap;
412 dev_info(NULL, "FME thermal mgmt Init.\n");
414 fme_thermal = (struct feature_fme_thermal *)feature->addr;
415 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
417 dev_info(NULL, "FME thermal cap %llx.\n",
418 (unsigned long long)fme_thermal->threshold_cap.csr);
420 if (thermal_cap.tmp_thshold_disabled)
421 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
426 static void fme_thermal_mgmt_uinit(struct feature *feature)
430 dev_info(NULL, "FME thermal mgmt UInit.\n");
434 fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
436 struct ifpga_fme_hw *fme = feature->parent;
438 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
441 switch (prop->prop_id) {
442 case FME_THERMAL_PROP_THRESHOLD1:
443 return fme_thermal_set_threshold1(fme, prop->data);
444 case FME_THERMAL_PROP_THRESHOLD2:
445 return fme_thermal_set_threshold2(fme, prop->data);
446 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
447 return fme_thermal_set_threshold1_policy(fme, prop->data);
454 fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
456 struct ifpga_fme_hw *fme = feature->parent;
458 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
459 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
460 prop->prop_id != FME_THERMAL_PROP_REVISION)
463 switch (prop->prop_id) {
464 case FME_THERMAL_PROP_THRESHOLD1:
465 return fme_thermal_get_threshold1(fme, &prop->data);
466 case FME_THERMAL_PROP_THRESHOLD2:
467 return fme_thermal_get_threshold2(fme, &prop->data);
468 case FME_THERMAL_PROP_THRESHOLD_TRIP:
469 return fme_thermal_get_threshold_trip(fme, &prop->data);
470 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
471 return fme_thermal_get_threshold1_reached(fme, &prop->data);
472 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
473 return fme_thermal_get_threshold2_reached(fme, &prop->data);
474 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
475 return fme_thermal_get_threshold1_policy(fme, &prop->data);
476 case FME_THERMAL_PROP_TEMPERATURE:
477 return fme_thermal_get_temperature(fme, &prop->data);
478 case FME_THERMAL_PROP_REVISION:
479 return fme_thermal_get_revision(fme, &prop->data);
485 struct feature_ops fme_thermal_mgmt_ops = {
486 .init = fme_thermal_mgmt_init,
487 .uinit = fme_thermal_mgmt_uinit,
488 .get_prop = fme_thermal_get_prop,
489 .set_prop = fme_thermal_set_prop,
492 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
494 struct feature_fme_power *fme_power
495 = get_fme_feature_ioaddr_by_index(fme,
496 FME_FEATURE_ID_POWER_MGMT);
497 struct feature_fme_pm_status pm_status;
499 pm_status.csr = readq(&fme_power->status);
501 *consumed = pm_status.pwr_consumed;
506 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
508 struct feature_fme_power *fme_power
509 = get_fme_feature_ioaddr_by_index(fme,
510 FME_FEATURE_ID_POWER_MGMT);
511 struct feature_fme_pm_ap_threshold pm_ap_threshold;
513 pm_ap_threshold.csr = readq(&fme_power->threshold);
515 *threshold = pm_ap_threshold.threshold1;
520 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
522 struct feature_fme_power *fme_power
523 = get_fme_feature_ioaddr_by_index(fme,
524 FME_FEATURE_ID_POWER_MGMT);
525 struct feature_fme_pm_ap_threshold pm_ap_threshold;
527 spinlock_lock(&fme->lock);
528 pm_ap_threshold.csr = readq(&fme_power->threshold);
530 if (threshold <= PWR_THRESHOLD_MAX) {
531 pm_ap_threshold.threshold1 = threshold;
533 spinlock_unlock(&fme->lock);
537 writeq(pm_ap_threshold.csr, &fme_power->threshold);
538 spinlock_unlock(&fme->lock);
543 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
545 struct feature_fme_power *fme_power
546 = get_fme_feature_ioaddr_by_index(fme,
547 FME_FEATURE_ID_POWER_MGMT);
548 struct feature_fme_pm_ap_threshold pm_ap_threshold;
550 pm_ap_threshold.csr = readq(&fme_power->threshold);
552 *threshold = pm_ap_threshold.threshold2;
557 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
559 struct feature_fme_power *fme_power
560 = get_fme_feature_ioaddr_by_index(fme,
561 FME_FEATURE_ID_POWER_MGMT);
562 struct feature_fme_pm_ap_threshold pm_ap_threshold;
564 spinlock_lock(&fme->lock);
565 pm_ap_threshold.csr = readq(&fme_power->threshold);
567 if (threshold <= PWR_THRESHOLD_MAX) {
568 pm_ap_threshold.threshold2 = threshold;
570 spinlock_unlock(&fme->lock);
574 writeq(pm_ap_threshold.csr, &fme_power->threshold);
575 spinlock_unlock(&fme->lock);
580 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
581 u64 *threshold_status)
583 struct feature_fme_power *fme_power
584 = get_fme_feature_ioaddr_by_index(fme,
585 FME_FEATURE_ID_POWER_MGMT);
586 struct feature_fme_pm_ap_threshold pm_ap_threshold;
588 pm_ap_threshold.csr = readq(&fme_power->threshold);
590 *threshold_status = pm_ap_threshold.threshold1_status;
595 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
596 u64 *threshold_status)
598 struct feature_fme_power *fme_power
599 = get_fme_feature_ioaddr_by_index(fme,
600 FME_FEATURE_ID_POWER_MGMT);
601 struct feature_fme_pm_ap_threshold pm_ap_threshold;
603 pm_ap_threshold.csr = readq(&fme_power->threshold);
605 *threshold_status = pm_ap_threshold.threshold2_status;
610 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
612 struct feature_fme_power *fme_power
613 = get_fme_feature_ioaddr_by_index(fme,
614 FME_FEATURE_ID_POWER_MGMT);
615 struct feature_fme_pm_status pm_status;
617 pm_status.csr = readq(&fme_power->status);
619 *rtl = pm_status.fpga_latency_report;
624 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
626 struct feature_fme_power *fme_power
627 = get_fme_feature_ioaddr_by_index(fme,
628 FME_FEATURE_ID_POWER_MGMT);
629 struct feature_fme_pm_xeon_limit xeon_limit;
631 xeon_limit.csr = readq(&fme_power->xeon_limit);
633 if (!xeon_limit.enable)
634 xeon_limit.pwr_limit = 0;
636 *limit = xeon_limit.pwr_limit;
641 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
643 struct feature_fme_power *fme_power
644 = get_fme_feature_ioaddr_by_index(fme,
645 FME_FEATURE_ID_POWER_MGMT);
646 struct feature_fme_pm_fpga_limit fpga_limit;
648 fpga_limit.csr = readq(&fme_power->fpga_limit);
650 if (!fpga_limit.enable)
651 fpga_limit.pwr_limit = 0;
653 *limit = fpga_limit.pwr_limit;
658 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
660 struct feature_fme_power *fme_power
661 = get_fme_feature_ioaddr_by_index(fme,
662 FME_FEATURE_ID_POWER_MGMT);
663 struct feature_header header;
665 header.csr = readq(&fme_power->header);
666 *revision = header.revision;
671 static int fme_power_mgmt_init(struct feature *feature)
675 dev_info(NULL, "FME power mgmt Init.\n");
680 static void fme_power_mgmt_uinit(struct feature *feature)
684 dev_info(NULL, "FME power mgmt UInit.\n");
687 static int fme_power_mgmt_get_prop(struct feature *feature,
688 struct feature_prop *prop)
690 struct ifpga_fme_hw *fme = feature->parent;
692 switch (prop->prop_id) {
693 case FME_PWR_PROP_CONSUMED:
694 return fme_pwr_get_consumed(fme, &prop->data);
695 case FME_PWR_PROP_THRESHOLD1:
696 return fme_pwr_get_threshold1(fme, &prop->data);
697 case FME_PWR_PROP_THRESHOLD2:
698 return fme_pwr_get_threshold2(fme, &prop->data);
699 case FME_PWR_PROP_THRESHOLD1_STATUS:
700 return fme_pwr_get_threshold1_status(fme, &prop->data);
701 case FME_PWR_PROP_THRESHOLD2_STATUS:
702 return fme_pwr_get_threshold2_status(fme, &prop->data);
703 case FME_PWR_PROP_RTL:
704 return fme_pwr_get_rtl(fme, &prop->data);
705 case FME_PWR_PROP_XEON_LIMIT:
706 return fme_pwr_get_xeon_limit(fme, &prop->data);
707 case FME_PWR_PROP_FPGA_LIMIT:
708 return fme_pwr_get_fpga_limit(fme, &prop->data);
709 case FME_PWR_PROP_REVISION:
710 return fme_pwr_get_revision(fme, &prop->data);
716 static int fme_power_mgmt_set_prop(struct feature *feature,
717 struct feature_prop *prop)
719 struct ifpga_fme_hw *fme = feature->parent;
721 switch (prop->prop_id) {
722 case FME_PWR_PROP_THRESHOLD1:
723 return fme_pwr_set_threshold1(fme, prop->data);
724 case FME_PWR_PROP_THRESHOLD2:
725 return fme_pwr_set_threshold2(fme, prop->data);
731 struct feature_ops fme_power_mgmt_ops = {
732 .init = fme_power_mgmt_init,
733 .uinit = fme_power_mgmt_uinit,
734 .get_prop = fme_power_mgmt_get_prop,
735 .set_prop = fme_power_mgmt_set_prop,
738 static int fme_hssi_eth_init(struct feature *feature)
744 static void fme_hssi_eth_uinit(struct feature *feature)
749 struct feature_ops fme_hssi_eth_ops = {
750 .init = fme_hssi_eth_init,
751 .uinit = fme_hssi_eth_uinit,
754 static int fme_emif_init(struct feature *feature)
760 static void fme_emif_uinit(struct feature *feature)
765 struct feature_ops fme_emif_ops = {
766 .init = fme_emif_init,
767 .uinit = fme_emif_uinit,
770 static int spi_self_checking(void)
775 ret = max10_reg_read(0x30043c, &val);
779 if (val != 0x87654321) {
780 dev_err(NULL, "Read MAX10 test register fail: 0x%x\n", val);
784 dev_info(NULL, "Read MAX10 test register success, SPI self-test done\n");
789 static int fme_spi_init(struct feature *feature)
791 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
792 struct altera_spi_device *spi_master;
793 struct intel_max10_device *max10;
796 dev_info(fme, "FME SPI Master (Max10) Init.\n");
797 dev_debug(fme, "FME SPI base addr %p.\n",
799 dev_debug(fme, "spi param=0x%llx\n",
800 (unsigned long long)opae_readq(feature->addr + 0x8));
802 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
806 altera_spi_init(spi_master);
808 max10 = intel_max10_device_probe(spi_master, 0);
811 dev_err(fme, "max10 init fail\n");
815 fme->max10_dev = max10;
818 if (spi_self_checking()) {
826 intel_max10_device_remove(fme->max10_dev);
828 altera_spi_release(spi_master);
832 static void fme_spi_uinit(struct feature *feature)
834 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
837 intel_max10_device_remove(fme->max10_dev);
840 struct feature_ops fme_spi_master_ops = {
841 .init = fme_spi_init,
842 .uinit = fme_spi_uinit,
845 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
848 unsigned long timeout = msecs_to_timer_cycles(10000);
852 if (spi_reg_read(dev, NIOS_SPI_INIT_DONE, &val))
857 ticks = rte_get_timer_cycles();
858 if (time_after(ticks, timeout))
866 static int nios_spi_check_error(struct altera_spi_device *dev)
870 if (spi_reg_read(dev, NIOS_SPI_INIT_STS0, &value))
873 dev_debug(dev, "SPI init status0 0x%x\n", value);
875 /* Error code: 0xFFF0 to 0xFFFC */
876 if (value >= 0xFFF0 && value <= 0xFFFC)
880 if (spi_reg_read(dev, NIOS_SPI_INIT_STS1, &value))
883 dev_debug(dev, "SPI init status1 0x%x\n", value);
885 /* Error code: 0xFFF0 to 0xFFFC */
886 if (value >= 0xFFF0 && value <= 0xFFFC)
892 static int fme_nios_spi_init(struct feature *feature)
894 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
895 struct altera_spi_device *spi_master;
896 struct intel_max10_device *max10;
899 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
900 dev_debug(fme, "FME SPI base addr %p.\n",
902 dev_debug(fme, "spi param=0x%llx\n",
903 (unsigned long long)opae_readq(feature->addr + 0x8));
905 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
910 * 1. wait A10 NIOS initial finished and
911 * release the SPI master to Host
913 ret = nios_spi_wait_init_done(spi_master);
915 dev_err(fme, "FME NIOS_SPI init fail\n");
919 dev_info(fme, "FME NIOS_SPI initial done\n");
921 /* 2. check if error occur? */
922 if (nios_spi_check_error(spi_master))
923 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
925 /* 3. init the spi master*/
926 altera_spi_init(spi_master);
928 /* init the max10 device */
929 max10 = intel_max10_device_probe(spi_master, 0);
932 dev_err(fme, "max10 init fail\n");
936 fme->max10_dev = max10;
939 if (spi_self_checking())
945 intel_max10_device_remove(fme->max10_dev);
947 altera_spi_release(spi_master);
951 static void fme_nios_spi_uinit(struct feature *feature)
953 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
956 intel_max10_device_remove(fme->max10_dev);
959 struct feature_ops fme_nios_spi_master_ops = {
960 .init = fme_nios_spi_init,
961 .uinit = fme_nios_spi_uinit,