1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #include "opae_intel_max10.h"
9 #include "opae_at24_eeprom.h"
11 #define PWR_THRESHOLD_MAX 0x7F
13 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
15 struct ifpga_feature *feature;
20 feature = get_fme_feature_by_id(fme, prop->feature_id);
22 if (feature && feature->ops && feature->ops->get_prop)
23 return feature->ops->get_prop(feature, prop);
28 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
30 struct ifpga_feature *feature;
35 feature = get_fme_feature_by_id(fme, prop->feature_id);
37 if (feature && feature->ops && feature->ops->set_prop)
38 return feature->ops->set_prop(feature, prop);
43 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
45 struct ifpga_feature *feature;
50 feature = get_fme_feature_by_id(fme, feature_id);
52 if (feature && feature->ops && feature->ops->set_irq)
53 return feature->ops->set_irq(feature, irq_set);
58 /* fme private feature head */
59 static int fme_hdr_init(struct ifpga_feature *feature)
61 struct feature_fme_header *fme_hdr;
63 fme_hdr = (struct feature_fme_header *)feature->addr;
65 dev_info(NULL, "FME HDR Init.\n");
66 dev_info(NULL, "FME cap %llx.\n",
67 (unsigned long long)fme_hdr->capability.csr);
72 static void fme_hdr_uinit(struct ifpga_feature *feature)
76 dev_info(NULL, "FME HDR UInit.\n");
79 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
81 struct feature_fme_header *fme_hdr
82 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
83 struct feature_header header;
85 header.csr = readq(&fme_hdr->header);
86 *revision = header.revision;
91 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
93 struct feature_fme_header *fme_hdr
94 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
95 struct feature_fme_capability fme_capability;
97 fme_capability.csr = readq(&fme_hdr->capability);
98 *ports_num = fme_capability.num_ports;
103 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
105 struct feature_fme_header *fme_hdr
106 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
107 struct feature_fme_capability fme_capability;
109 fme_capability.csr = readq(&fme_hdr->capability);
110 *cache_size = fme_capability.cache_size;
115 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
117 struct feature_fme_header *fme_hdr
118 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
119 struct feature_fme_capability fme_capability;
121 fme_capability.csr = readq(&fme_hdr->capability);
122 *version = fme_capability.fabric_verid;
127 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
129 struct feature_fme_header *fme_hdr
130 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
131 struct feature_fme_capability fme_capability;
133 fme_capability.csr = readq(&fme_hdr->capability);
134 *socket_id = fme_capability.socket_id;
139 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
142 struct feature_fme_header *fme_hdr
143 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
145 *bitstream_id = readq(&fme_hdr->bitstream_id);
150 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
151 u64 *bitstream_metadata)
153 struct feature_fme_header *fme_hdr
154 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
156 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
162 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
164 struct ifpga_fme_hw *fme = feature->parent;
166 switch (prop->prop_id) {
167 case FME_HDR_PROP_REVISION:
168 return fme_hdr_get_revision(fme, &prop->data);
169 case FME_HDR_PROP_PORTS_NUM:
170 return fme_hdr_get_ports_num(fme, &prop->data);
171 case FME_HDR_PROP_CACHE_SIZE:
172 return fme_hdr_get_cache_size(fme, &prop->data);
173 case FME_HDR_PROP_VERSION:
174 return fme_hdr_get_version(fme, &prop->data);
175 case FME_HDR_PROP_SOCKET_ID:
176 return fme_hdr_get_socket_id(fme, &prop->data);
177 case FME_HDR_PROP_BITSTREAM_ID:
178 return fme_hdr_get_bitstream_id(fme, &prop->data);
179 case FME_HDR_PROP_BITSTREAM_METADATA:
180 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
186 struct ifpga_feature_ops fme_hdr_ops = {
187 .init = fme_hdr_init,
188 .uinit = fme_hdr_uinit,
189 .get_prop = fme_hdr_get_prop,
192 /* thermal management */
193 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
195 struct feature_fme_thermal *thermal;
196 struct feature_fme_tmp_threshold temp_threshold;
198 thermal = get_fme_feature_ioaddr_by_index(fme,
199 FME_FEATURE_ID_THERMAL_MGMT);
201 temp_threshold.csr = readq(&thermal->threshold);
202 *thres1 = temp_threshold.tmp_thshold1;
207 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
209 struct feature_fme_thermal *thermal;
210 struct feature_fme_header *fme_hdr;
211 struct feature_fme_tmp_threshold tmp_threshold;
212 struct feature_fme_capability fme_capability;
214 thermal = get_fme_feature_ioaddr_by_index(fme,
215 FME_FEATURE_ID_THERMAL_MGMT);
216 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
218 spinlock_lock(&fme->lock);
219 tmp_threshold.csr = readq(&thermal->threshold);
220 fme_capability.csr = readq(&fme_hdr->capability);
222 if (fme_capability.lock_bit == 1) {
223 spinlock_unlock(&fme->lock);
225 } else if (thres1 > 100) {
226 spinlock_unlock(&fme->lock);
228 } else if (thres1 == 0) {
229 tmp_threshold.tmp_thshold1_enable = 0;
230 tmp_threshold.tmp_thshold1 = thres1;
232 tmp_threshold.tmp_thshold1_enable = 1;
233 tmp_threshold.tmp_thshold1 = thres1;
236 writeq(tmp_threshold.csr, &thermal->threshold);
237 spinlock_unlock(&fme->lock);
242 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
244 struct feature_fme_thermal *thermal;
245 struct feature_fme_tmp_threshold temp_threshold;
247 thermal = get_fme_feature_ioaddr_by_index(fme,
248 FME_FEATURE_ID_THERMAL_MGMT);
250 temp_threshold.csr = readq(&thermal->threshold);
251 *thres2 = temp_threshold.tmp_thshold2;
256 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
258 struct feature_fme_thermal *thermal;
259 struct feature_fme_header *fme_hdr;
260 struct feature_fme_tmp_threshold tmp_threshold;
261 struct feature_fme_capability fme_capability;
263 thermal = get_fme_feature_ioaddr_by_index(fme,
264 FME_FEATURE_ID_THERMAL_MGMT);
265 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
267 spinlock_lock(&fme->lock);
268 tmp_threshold.csr = readq(&thermal->threshold);
269 fme_capability.csr = readq(&fme_hdr->capability);
271 if (fme_capability.lock_bit == 1) {
272 spinlock_unlock(&fme->lock);
274 } else if (thres2 > 100) {
275 spinlock_unlock(&fme->lock);
277 } else if (thres2 == 0) {
278 tmp_threshold.tmp_thshold2_enable = 0;
279 tmp_threshold.tmp_thshold2 = thres2;
281 tmp_threshold.tmp_thshold2_enable = 1;
282 tmp_threshold.tmp_thshold2 = thres2;
285 writeq(tmp_threshold.csr, &thermal->threshold);
286 spinlock_unlock(&fme->lock);
291 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
294 struct feature_fme_thermal *thermal;
295 struct feature_fme_tmp_threshold temp_threshold;
297 thermal = get_fme_feature_ioaddr_by_index(fme,
298 FME_FEATURE_ID_THERMAL_MGMT);
300 temp_threshold.csr = readq(&thermal->threshold);
301 *thres_trip = temp_threshold.therm_trip_thshold;
306 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
309 struct feature_fme_thermal *thermal;
310 struct feature_fme_tmp_threshold temp_threshold;
312 thermal = get_fme_feature_ioaddr_by_index(fme,
313 FME_FEATURE_ID_THERMAL_MGMT);
315 temp_threshold.csr = readq(&thermal->threshold);
316 *thres1_reached = temp_threshold.thshold1_status;
321 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
324 struct feature_fme_thermal *thermal;
325 struct feature_fme_tmp_threshold temp_threshold;
327 thermal = get_fme_feature_ioaddr_by_index(fme,
328 FME_FEATURE_ID_THERMAL_MGMT);
330 temp_threshold.csr = readq(&thermal->threshold);
331 *thres1_reached = temp_threshold.thshold2_status;
336 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
339 struct feature_fme_thermal *thermal;
340 struct feature_fme_tmp_threshold temp_threshold;
342 thermal = get_fme_feature_ioaddr_by_index(fme,
343 FME_FEATURE_ID_THERMAL_MGMT);
345 temp_threshold.csr = readq(&thermal->threshold);
346 *thres1_policy = temp_threshold.thshold_policy;
351 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
354 struct feature_fme_thermal *thermal;
355 struct feature_fme_tmp_threshold tmp_threshold;
357 thermal = get_fme_feature_ioaddr_by_index(fme,
358 FME_FEATURE_ID_THERMAL_MGMT);
360 spinlock_lock(&fme->lock);
361 tmp_threshold.csr = readq(&thermal->threshold);
363 if (thres1_policy == 0) {
364 tmp_threshold.thshold_policy = 0;
365 } else if (thres1_policy == 1) {
366 tmp_threshold.thshold_policy = 1;
368 spinlock_unlock(&fme->lock);
372 writeq(tmp_threshold.csr, &thermal->threshold);
373 spinlock_unlock(&fme->lock);
378 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
380 struct feature_fme_thermal *thermal;
381 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
383 thermal = get_fme_feature_ioaddr_by_index(fme,
384 FME_FEATURE_ID_THERMAL_MGMT);
386 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
387 *temp = temp_rdsensor_fmt1.fpga_temp;
392 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
394 struct feature_fme_thermal *fme_thermal
395 = get_fme_feature_ioaddr_by_index(fme,
396 FME_FEATURE_ID_THERMAL_MGMT);
397 struct feature_header header;
399 header.csr = readq(&fme_thermal->header);
400 *revision = header.revision;
405 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
407 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
409 struct feature_fme_thermal *fme_thermal;
410 struct feature_fme_tmp_threshold_cap thermal_cap;
414 dev_info(NULL, "FME thermal mgmt Init.\n");
416 fme_thermal = (struct feature_fme_thermal *)feature->addr;
417 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
419 dev_info(NULL, "FME thermal cap %llx.\n",
420 (unsigned long long)fme_thermal->threshold_cap.csr);
422 if (thermal_cap.tmp_thshold_disabled)
423 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
428 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
432 dev_info(NULL, "FME thermal mgmt UInit.\n");
436 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
438 struct ifpga_fme_hw *fme = feature->parent;
440 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
443 switch (prop->prop_id) {
444 case FME_THERMAL_PROP_THRESHOLD1:
445 return fme_thermal_set_threshold1(fme, prop->data);
446 case FME_THERMAL_PROP_THRESHOLD2:
447 return fme_thermal_set_threshold2(fme, prop->data);
448 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
449 return fme_thermal_set_threshold1_policy(fme, prop->data);
456 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
458 struct ifpga_fme_hw *fme = feature->parent;
460 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
461 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
462 prop->prop_id != FME_THERMAL_PROP_REVISION)
465 switch (prop->prop_id) {
466 case FME_THERMAL_PROP_THRESHOLD1:
467 return fme_thermal_get_threshold1(fme, &prop->data);
468 case FME_THERMAL_PROP_THRESHOLD2:
469 return fme_thermal_get_threshold2(fme, &prop->data);
470 case FME_THERMAL_PROP_THRESHOLD_TRIP:
471 return fme_thermal_get_threshold_trip(fme, &prop->data);
472 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
473 return fme_thermal_get_threshold1_reached(fme, &prop->data);
474 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
475 return fme_thermal_get_threshold2_reached(fme, &prop->data);
476 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
477 return fme_thermal_get_threshold1_policy(fme, &prop->data);
478 case FME_THERMAL_PROP_TEMPERATURE:
479 return fme_thermal_get_temperature(fme, &prop->data);
480 case FME_THERMAL_PROP_REVISION:
481 return fme_thermal_get_revision(fme, &prop->data);
487 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
488 .init = fme_thermal_mgmt_init,
489 .uinit = fme_thermal_mgmt_uinit,
490 .get_prop = fme_thermal_get_prop,
491 .set_prop = fme_thermal_set_prop,
494 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
496 struct feature_fme_power *fme_power
497 = get_fme_feature_ioaddr_by_index(fme,
498 FME_FEATURE_ID_POWER_MGMT);
499 struct feature_fme_pm_status pm_status;
501 pm_status.csr = readq(&fme_power->status);
503 *consumed = pm_status.pwr_consumed;
508 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
510 struct feature_fme_power *fme_power
511 = get_fme_feature_ioaddr_by_index(fme,
512 FME_FEATURE_ID_POWER_MGMT);
513 struct feature_fme_pm_ap_threshold pm_ap_threshold;
515 pm_ap_threshold.csr = readq(&fme_power->threshold);
517 *threshold = pm_ap_threshold.threshold1;
522 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
524 struct feature_fme_power *fme_power
525 = get_fme_feature_ioaddr_by_index(fme,
526 FME_FEATURE_ID_POWER_MGMT);
527 struct feature_fme_pm_ap_threshold pm_ap_threshold;
529 spinlock_lock(&fme->lock);
530 pm_ap_threshold.csr = readq(&fme_power->threshold);
532 if (threshold <= PWR_THRESHOLD_MAX) {
533 pm_ap_threshold.threshold1 = threshold;
535 spinlock_unlock(&fme->lock);
539 writeq(pm_ap_threshold.csr, &fme_power->threshold);
540 spinlock_unlock(&fme->lock);
545 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
547 struct feature_fme_power *fme_power
548 = get_fme_feature_ioaddr_by_index(fme,
549 FME_FEATURE_ID_POWER_MGMT);
550 struct feature_fme_pm_ap_threshold pm_ap_threshold;
552 pm_ap_threshold.csr = readq(&fme_power->threshold);
554 *threshold = pm_ap_threshold.threshold2;
559 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
561 struct feature_fme_power *fme_power
562 = get_fme_feature_ioaddr_by_index(fme,
563 FME_FEATURE_ID_POWER_MGMT);
564 struct feature_fme_pm_ap_threshold pm_ap_threshold;
566 spinlock_lock(&fme->lock);
567 pm_ap_threshold.csr = readq(&fme_power->threshold);
569 if (threshold <= PWR_THRESHOLD_MAX) {
570 pm_ap_threshold.threshold2 = threshold;
572 spinlock_unlock(&fme->lock);
576 writeq(pm_ap_threshold.csr, &fme_power->threshold);
577 spinlock_unlock(&fme->lock);
582 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
583 u64 *threshold_status)
585 struct feature_fme_power *fme_power
586 = get_fme_feature_ioaddr_by_index(fme,
587 FME_FEATURE_ID_POWER_MGMT);
588 struct feature_fme_pm_ap_threshold pm_ap_threshold;
590 pm_ap_threshold.csr = readq(&fme_power->threshold);
592 *threshold_status = pm_ap_threshold.threshold1_status;
597 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
598 u64 *threshold_status)
600 struct feature_fme_power *fme_power
601 = get_fme_feature_ioaddr_by_index(fme,
602 FME_FEATURE_ID_POWER_MGMT);
603 struct feature_fme_pm_ap_threshold pm_ap_threshold;
605 pm_ap_threshold.csr = readq(&fme_power->threshold);
607 *threshold_status = pm_ap_threshold.threshold2_status;
612 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
614 struct feature_fme_power *fme_power
615 = get_fme_feature_ioaddr_by_index(fme,
616 FME_FEATURE_ID_POWER_MGMT);
617 struct feature_fme_pm_status pm_status;
619 pm_status.csr = readq(&fme_power->status);
621 *rtl = pm_status.fpga_latency_report;
626 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
628 struct feature_fme_power *fme_power
629 = get_fme_feature_ioaddr_by_index(fme,
630 FME_FEATURE_ID_POWER_MGMT);
631 struct feature_fme_pm_xeon_limit xeon_limit;
633 xeon_limit.csr = readq(&fme_power->xeon_limit);
635 if (!xeon_limit.enable)
636 xeon_limit.pwr_limit = 0;
638 *limit = xeon_limit.pwr_limit;
643 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
645 struct feature_fme_power *fme_power
646 = get_fme_feature_ioaddr_by_index(fme,
647 FME_FEATURE_ID_POWER_MGMT);
648 struct feature_fme_pm_fpga_limit fpga_limit;
650 fpga_limit.csr = readq(&fme_power->fpga_limit);
652 if (!fpga_limit.enable)
653 fpga_limit.pwr_limit = 0;
655 *limit = fpga_limit.pwr_limit;
660 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
662 struct feature_fme_power *fme_power
663 = get_fme_feature_ioaddr_by_index(fme,
664 FME_FEATURE_ID_POWER_MGMT);
665 struct feature_header header;
667 header.csr = readq(&fme_power->header);
668 *revision = header.revision;
673 static int fme_power_mgmt_init(struct ifpga_feature *feature)
677 dev_info(NULL, "FME power mgmt Init.\n");
682 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
686 dev_info(NULL, "FME power mgmt UInit.\n");
689 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
690 struct feature_prop *prop)
692 struct ifpga_fme_hw *fme = feature->parent;
694 switch (prop->prop_id) {
695 case FME_PWR_PROP_CONSUMED:
696 return fme_pwr_get_consumed(fme, &prop->data);
697 case FME_PWR_PROP_THRESHOLD1:
698 return fme_pwr_get_threshold1(fme, &prop->data);
699 case FME_PWR_PROP_THRESHOLD2:
700 return fme_pwr_get_threshold2(fme, &prop->data);
701 case FME_PWR_PROP_THRESHOLD1_STATUS:
702 return fme_pwr_get_threshold1_status(fme, &prop->data);
703 case FME_PWR_PROP_THRESHOLD2_STATUS:
704 return fme_pwr_get_threshold2_status(fme, &prop->data);
705 case FME_PWR_PROP_RTL:
706 return fme_pwr_get_rtl(fme, &prop->data);
707 case FME_PWR_PROP_XEON_LIMIT:
708 return fme_pwr_get_xeon_limit(fme, &prop->data);
709 case FME_PWR_PROP_FPGA_LIMIT:
710 return fme_pwr_get_fpga_limit(fme, &prop->data);
711 case FME_PWR_PROP_REVISION:
712 return fme_pwr_get_revision(fme, &prop->data);
718 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
719 struct feature_prop *prop)
721 struct ifpga_fme_hw *fme = feature->parent;
723 switch (prop->prop_id) {
724 case FME_PWR_PROP_THRESHOLD1:
725 return fme_pwr_set_threshold1(fme, prop->data);
726 case FME_PWR_PROP_THRESHOLD2:
727 return fme_pwr_set_threshold2(fme, prop->data);
733 struct ifpga_feature_ops fme_power_mgmt_ops = {
734 .init = fme_power_mgmt_init,
735 .uinit = fme_power_mgmt_uinit,
736 .get_prop = fme_power_mgmt_get_prop,
737 .set_prop = fme_power_mgmt_set_prop,
740 static int fme_hssi_eth_init(struct ifpga_feature *feature)
746 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
751 struct ifpga_feature_ops fme_hssi_eth_ops = {
752 .init = fme_hssi_eth_init,
753 .uinit = fme_hssi_eth_uinit,
756 static int fme_emif_init(struct ifpga_feature *feature)
762 static void fme_emif_uinit(struct ifpga_feature *feature)
767 struct ifpga_feature_ops fme_emif_ops = {
768 .init = fme_emif_init,
769 .uinit = fme_emif_uinit,
772 static const char *board_type_to_string(u32 type)
782 return "VC_4x25G+2x25G";
790 static int board_type_to_info(u32 type,
791 struct ifpga_fme_board_info *info)
795 info->nums_of_retimer = 2;
796 info->ports_per_retimer = 4;
797 info->nums_of_fvl = 2;
798 info->ports_per_fvl = 4;
801 info->nums_of_retimer = 1;
802 info->ports_per_retimer = 4;
803 info->nums_of_fvl = 2;
804 info->ports_per_fvl = 2;
807 info->nums_of_retimer = 2;
808 info->ports_per_retimer = 1;
809 info->nums_of_fvl = 1;
810 info->ports_per_fvl = 2;
813 info->nums_of_retimer = 2;
814 info->ports_per_retimer = 2;
815 info->nums_of_fvl = 2;
816 info->ports_per_fvl = 2;
825 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
827 struct fme_bitstream_id id;
830 if (fme_hdr_get_bitstream_id(fme, &id.id))
833 fme->board_info.type = id.interface;
834 fme->board_info.build_hash = id.hash;
835 fme->board_info.debug_version = id.debug;
836 fme->board_info.major_version = id.major;
837 fme->board_info.minor_version = id.minor;
839 dev_info(fme, "board type: %s major_version:%u minor_version:%u build_hash:%u\n",
840 board_type_to_string(fme->board_info.type),
841 fme->board_info.major_version,
842 fme->board_info.minor_version,
843 fme->board_info.build_hash);
845 if (board_type_to_info(fme->board_info.type, &fme->board_info))
848 dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
849 fme->board_info.nums_of_retimer,
850 fme->board_info.ports_per_retimer,
851 fme->board_info.nums_of_fvl,
852 fme->board_info.ports_per_fvl);
854 if (max10_sys_read(MAX10_BUILD_VER, &val))
856 fme->board_info.max10_version = val & 0xffffff;
858 if (max10_sys_read(NIOS2_FW_VERSION, &val))
860 fme->board_info.nios_fw_version = val & 0xffffff;
862 dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
863 fme->board_info.max10_version,
864 fme->board_info.nios_fw_version);
869 static int spi_self_checking(void)
874 ret = max10_sys_read(MAX10_TEST_REG, &val);
878 dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
883 static int fme_spi_init(struct ifpga_feature *feature)
885 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
886 struct altera_spi_device *spi_master;
887 struct intel_max10_device *max10;
890 dev_info(fme, "FME SPI Master (Max10) Init.\n");
891 dev_debug(fme, "FME SPI base addr %p.\n",
893 dev_debug(fme, "spi param=0x%llx\n",
894 (unsigned long long)opae_readq(feature->addr + 0x8));
896 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
900 altera_spi_init(spi_master);
902 max10 = intel_max10_device_probe(spi_master, 0);
905 dev_err(fme, "max10 init fail\n");
909 fme->max10_dev = max10;
912 if (spi_self_checking()) {
920 intel_max10_device_remove(fme->max10_dev);
922 altera_spi_release(spi_master);
926 static void fme_spi_uinit(struct ifpga_feature *feature)
928 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
931 intel_max10_device_remove(fme->max10_dev);
934 struct ifpga_feature_ops fme_spi_master_ops = {
935 .init = fme_spi_init,
936 .uinit = fme_spi_uinit,
939 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
942 unsigned long timeout = msecs_to_timer_cycles(10000);
946 if (spi_reg_read(dev, NIOS_SPI_INIT_DONE, &val))
951 ticks = rte_get_timer_cycles();
952 if (time_after(ticks, timeout))
960 static int nios_spi_check_error(struct altera_spi_device *dev)
964 if (spi_reg_read(dev, NIOS_SPI_INIT_STS0, &value))
967 dev_debug(dev, "SPI init status0 0x%x\n", value);
969 /* Error code: 0xFFF0 to 0xFFFC */
970 if (value >= 0xFFF0 && value <= 0xFFFC)
974 if (spi_reg_read(dev, NIOS_SPI_INIT_STS1, &value))
977 dev_debug(dev, "SPI init status1 0x%x\n", value);
979 /* Error code: 0xFFF0 to 0xFFFC */
980 if (value >= 0xFFF0 && value <= 0xFFFC)
986 static int fme_nios_spi_init(struct ifpga_feature *feature)
988 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
989 struct altera_spi_device *spi_master;
990 struct intel_max10_device *max10;
993 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
994 dev_debug(fme, "FME SPI base addr %p.\n",
996 dev_debug(fme, "spi param=0x%llx\n",
997 (unsigned long long)opae_readq(feature->addr + 0x8));
999 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1004 * 1. wait A10 NIOS initial finished and
1005 * release the SPI master to Host
1007 ret = nios_spi_wait_init_done(spi_master);
1009 dev_err(fme, "FME NIOS_SPI init fail\n");
1013 dev_info(fme, "FME NIOS_SPI initial done\n");
1015 /* 2. check if error occur? */
1016 if (nios_spi_check_error(spi_master))
1017 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1019 /* 3. init the spi master*/
1020 altera_spi_init(spi_master);
1022 /* init the max10 device */
1023 max10 = intel_max10_device_probe(spi_master, 0);
1026 dev_err(fme, "max10 init fail\n");
1030 fme_get_board_interface(fme);
1032 fme->max10_dev = max10;
1035 if (spi_self_checking())
1041 intel_max10_device_remove(fme->max10_dev);
1043 altera_spi_release(spi_master);
1047 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1049 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1052 intel_max10_device_remove(fme->max10_dev);
1055 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1056 .init = fme_nios_spi_init,
1057 .uinit = fme_nios_spi_uinit,
1060 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1064 char read_buf[20] = {0,};
1065 const char *string = "1a2b3c4d5e";
1067 opae_memcpy(buf, string, strlen(string));
1069 ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1070 (u8 *)buf, strlen(string));
1072 dev_err(NULL, "write i2c error:%d\n", ret);
1076 ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1077 (u8 *)read_buf, strlen(string));
1079 dev_err(NULL, "read i2c error:%d\n", ret);
1083 if (memcmp(buf, read_buf, strlen(string))) {
1084 dev_err(NULL, "%s test fail!\n", __func__);
1088 dev_info(NULL, "%s test successful\n", __func__);
1093 static int fme_i2c_init(struct ifpga_feature *feature)
1095 struct feature_fme_i2c *i2c;
1096 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1098 i2c = (struct feature_fme_i2c *)feature->addr;
1100 dev_info(NULL, "FME I2C Master Init.\n");
1102 fme->i2c_master = altera_i2c_probe(i2c);
1103 if (!fme->i2c_master)
1106 /* MAC ROM self test */
1107 i2c_mac_rom_test(fme->i2c_master);
1112 static void fme_i2c_uninit(struct ifpga_feature *feature)
1114 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1116 altera_i2c_remove(fme->i2c_master);
1119 struct ifpga_feature_ops fme_i2c_master_ops = {
1120 .init = fme_i2c_init,
1121 .uinit = fme_i2c_uninit,
1124 static int fme_eth_group_init(struct ifpga_feature *feature)
1126 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1127 struct eth_group_device *dev;
1129 dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1133 fme->eth_dev[dev->group_id] = dev;
1135 fme->eth_group_region[dev->group_id].addr =
1137 fme->eth_group_region[dev->group_id].phys_addr =
1139 fme->eth_group_region[dev->group_id].len =
1142 fme->nums_eth_dev++;
1144 dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1145 dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1146 dev->group_id, feature->addr,
1147 (unsigned long long)feature->phys_addr,
1153 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1158 struct ifpga_feature_ops fme_eth_group_ops = {
1159 .init = fme_eth_group_init,
1160 .uinit = fme_eth_group_uinit,
1163 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1164 void *buf, int size)
1166 struct altera_i2c_dev *dev;
1168 dev = fme->i2c_master;
1172 return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1175 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1176 void *buf, int size)
1178 struct altera_i2c_dev *dev;
1180 dev = fme->i2c_master;
1184 return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1187 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1190 struct eth_group_device *dev;
1192 if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1195 dev = (struct eth_group_device *)fme->eth_dev[group_id];
1199 if (dev->status != ETH_GROUP_DEV_ATTACHED)
1205 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1207 return fme->nums_eth_dev;
1210 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1211 u8 group_id, struct opae_eth_group_info *info)
1213 struct eth_group_device *dev;
1215 dev = get_eth_group_dev(fme, group_id);
1219 info->group_id = group_id;
1220 info->speed = dev->speed;
1221 info->nums_of_mac = dev->mac_num;
1222 info->nums_of_phy = dev->phy_num;
1227 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1228 u8 type, u8 index, u16 addr, u32 *data)
1230 struct eth_group_device *dev;
1232 dev = get_eth_group_dev(fme, group_id);
1236 return eth_group_read_reg(dev, type, index, addr, data);
1239 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1240 u8 type, u8 index, u16 addr, u32 data)
1242 struct eth_group_device *dev;
1244 dev = get_eth_group_dev(fme, group_id);
1248 return eth_group_write_reg(dev, type, index, addr, data);
1251 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1254 struct eth_group_device *dev;
1256 dev = get_eth_group_dev(fme, group_id);
1263 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1264 struct opae_retimer_info *info)
1266 struct intel_max10_device *dev;
1268 dev = (struct intel_max10_device *)fme->max10_dev;
1272 info->nums_retimer = fme->board_info.nums_of_retimer;
1273 info->ports_per_retimer = fme->board_info.ports_per_retimer;
1274 info->nums_fvl = fme->board_info.nums_of_fvl;
1275 info->ports_per_fvl = fme->board_info.ports_per_fvl;
1277 /* The speed of PKVL is identical the eth group's speed */
1278 info->support_speed = fme_get_eth_group_speed(fme,
1279 LINE_SIDE_GROUP_ID);
1284 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1285 struct opae_retimer_status *status)
1287 struct intel_max10_device *dev;
1290 dev = (struct intel_max10_device *)fme->max10_dev;
1294 if (max10_sys_read(PKVL_LINK_STATUS, &val)) {
1295 dev_err(dev, "%s: read pkvl status fail\n", __func__);
1299 /* The speed of PKVL is identical the eth group's speed */
1300 status->speed = fme_get_eth_group_speed(fme,
1301 LINE_SIDE_GROUP_ID);
1303 status->line_link_bitmap = val;
1305 dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1307 status->line_link_bitmap);
1312 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1313 struct opae_sensor_info *sensor,
1314 unsigned int *value)
1316 struct intel_max10_device *dev;
1318 dev = (struct intel_max10_device *)fme->max10_dev;
1322 if (max10_sys_read(sensor->value_reg, value)) {
1323 dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1324 __func__, sensor->value_reg);
1328 *value *= sensor->multiplier;