1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #include "opae_intel_max10.h"
9 #include "opae_at24_eeprom.h"
10 #include "ifpga_sec_mgr.h"
12 #define PWR_THRESHOLD_MAX 0x7F
14 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
16 struct ifpga_feature *feature;
21 feature = get_fme_feature_by_id(fme, prop->feature_id);
23 if (feature && feature->ops && feature->ops->get_prop)
24 return feature->ops->get_prop(feature, prop);
29 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
31 struct ifpga_feature *feature;
36 feature = get_fme_feature_by_id(fme, prop->feature_id);
38 if (feature && feature->ops && feature->ops->set_prop)
39 return feature->ops->set_prop(feature, prop);
44 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
46 struct ifpga_feature *feature;
51 feature = get_fme_feature_by_id(fme, feature_id);
53 if (feature && feature->ops && feature->ops->set_irq)
54 return feature->ops->set_irq(feature, irq_set);
59 /* fme private feature head */
60 static int fme_hdr_init(struct ifpga_feature *feature)
62 struct feature_fme_header *fme_hdr;
64 fme_hdr = (struct feature_fme_header *)feature->addr;
66 dev_info(NULL, "FME HDR Init.\n");
67 dev_info(NULL, "FME cap %llx.\n",
68 (unsigned long long)fme_hdr->capability.csr);
73 static void fme_hdr_uinit(struct ifpga_feature *feature)
77 dev_info(NULL, "FME HDR UInit.\n");
80 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
82 struct feature_fme_header *fme_hdr
83 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
84 struct feature_header header;
86 header.csr = readq(&fme_hdr->header);
87 *revision = header.revision;
92 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
94 struct feature_fme_header *fme_hdr
95 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
96 struct feature_fme_capability fme_capability;
98 fme_capability.csr = readq(&fme_hdr->capability);
99 *ports_num = fme_capability.num_ports;
104 static int fme_hdr_get_port_type(struct ifpga_fme_hw *fme, u64 *port_type)
106 struct feature_fme_header *fme_hdr
107 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
108 struct feature_fme_port pt;
109 u32 port = (u32)((*port_type >> 32) & 0xffffffff);
111 pt.csr = readq(&fme_hdr->port[port]);
112 if (!pt.port_implemented)
114 if (pt.afu_access_control)
122 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
124 struct feature_fme_header *fme_hdr
125 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
126 struct feature_fme_capability fme_capability;
128 fme_capability.csr = readq(&fme_hdr->capability);
129 *cache_size = fme_capability.cache_size;
134 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
136 struct feature_fme_header *fme_hdr
137 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
138 struct feature_fme_capability fme_capability;
140 fme_capability.csr = readq(&fme_hdr->capability);
141 *version = fme_capability.fabric_verid;
146 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
148 struct feature_fme_header *fme_hdr
149 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
150 struct feature_fme_capability fme_capability;
152 fme_capability.csr = readq(&fme_hdr->capability);
153 *socket_id = fme_capability.socket_id;
158 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
161 struct feature_fme_header *fme_hdr
162 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
164 *bitstream_id = readq(&fme_hdr->bitstream_id);
169 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
170 u64 *bitstream_metadata)
172 struct feature_fme_header *fme_hdr
173 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
175 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
181 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
183 struct ifpga_fme_hw *fme = feature->parent;
185 switch (prop->prop_id) {
186 case FME_HDR_PROP_REVISION:
187 return fme_hdr_get_revision(fme, &prop->data);
188 case FME_HDR_PROP_PORTS_NUM:
189 return fme_hdr_get_ports_num(fme, &prop->data);
190 case FME_HDR_PROP_CACHE_SIZE:
191 return fme_hdr_get_cache_size(fme, &prop->data);
192 case FME_HDR_PROP_VERSION:
193 return fme_hdr_get_version(fme, &prop->data);
194 case FME_HDR_PROP_SOCKET_ID:
195 return fme_hdr_get_socket_id(fme, &prop->data);
196 case FME_HDR_PROP_BITSTREAM_ID:
197 return fme_hdr_get_bitstream_id(fme, &prop->data);
198 case FME_HDR_PROP_BITSTREAM_METADATA:
199 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
200 case FME_HDR_PROP_PORT_TYPE:
201 return fme_hdr_get_port_type(fme, &prop->data);
207 struct ifpga_feature_ops fme_hdr_ops = {
208 .init = fme_hdr_init,
209 .uinit = fme_hdr_uinit,
210 .get_prop = fme_hdr_get_prop,
213 /* thermal management */
214 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
216 struct feature_fme_thermal *thermal;
217 struct feature_fme_tmp_threshold temp_threshold;
219 thermal = get_fme_feature_ioaddr_by_index(fme,
220 FME_FEATURE_ID_THERMAL_MGMT);
222 temp_threshold.csr = readq(&thermal->threshold);
223 *thres1 = temp_threshold.tmp_thshold1;
228 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
230 struct feature_fme_thermal *thermal;
231 struct feature_fme_header *fme_hdr;
232 struct feature_fme_tmp_threshold tmp_threshold;
233 struct feature_fme_capability fme_capability;
235 thermal = get_fme_feature_ioaddr_by_index(fme,
236 FME_FEATURE_ID_THERMAL_MGMT);
237 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
239 spinlock_lock(&fme->lock);
240 tmp_threshold.csr = readq(&thermal->threshold);
241 fme_capability.csr = readq(&fme_hdr->capability);
243 if (fme_capability.lock_bit == 1) {
244 spinlock_unlock(&fme->lock);
246 } else if (thres1 > 100) {
247 spinlock_unlock(&fme->lock);
249 } else if (thres1 == 0) {
250 tmp_threshold.tmp_thshold1_enable = 0;
251 tmp_threshold.tmp_thshold1 = thres1;
253 tmp_threshold.tmp_thshold1_enable = 1;
254 tmp_threshold.tmp_thshold1 = thres1;
257 writeq(tmp_threshold.csr, &thermal->threshold);
258 spinlock_unlock(&fme->lock);
263 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
265 struct feature_fme_thermal *thermal;
266 struct feature_fme_tmp_threshold temp_threshold;
268 thermal = get_fme_feature_ioaddr_by_index(fme,
269 FME_FEATURE_ID_THERMAL_MGMT);
271 temp_threshold.csr = readq(&thermal->threshold);
272 *thres2 = temp_threshold.tmp_thshold2;
277 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
279 struct feature_fme_thermal *thermal;
280 struct feature_fme_header *fme_hdr;
281 struct feature_fme_tmp_threshold tmp_threshold;
282 struct feature_fme_capability fme_capability;
284 thermal = get_fme_feature_ioaddr_by_index(fme,
285 FME_FEATURE_ID_THERMAL_MGMT);
286 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
288 spinlock_lock(&fme->lock);
289 tmp_threshold.csr = readq(&thermal->threshold);
290 fme_capability.csr = readq(&fme_hdr->capability);
292 if (fme_capability.lock_bit == 1) {
293 spinlock_unlock(&fme->lock);
295 } else if (thres2 > 100) {
296 spinlock_unlock(&fme->lock);
298 } else if (thres2 == 0) {
299 tmp_threshold.tmp_thshold2_enable = 0;
300 tmp_threshold.tmp_thshold2 = thres2;
302 tmp_threshold.tmp_thshold2_enable = 1;
303 tmp_threshold.tmp_thshold2 = thres2;
306 writeq(tmp_threshold.csr, &thermal->threshold);
307 spinlock_unlock(&fme->lock);
312 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
315 struct feature_fme_thermal *thermal;
316 struct feature_fme_tmp_threshold temp_threshold;
318 thermal = get_fme_feature_ioaddr_by_index(fme,
319 FME_FEATURE_ID_THERMAL_MGMT);
321 temp_threshold.csr = readq(&thermal->threshold);
322 *thres_trip = temp_threshold.therm_trip_thshold;
327 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
330 struct feature_fme_thermal *thermal;
331 struct feature_fme_tmp_threshold temp_threshold;
333 thermal = get_fme_feature_ioaddr_by_index(fme,
334 FME_FEATURE_ID_THERMAL_MGMT);
336 temp_threshold.csr = readq(&thermal->threshold);
337 *thres1_reached = temp_threshold.thshold1_status;
342 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
345 struct feature_fme_thermal *thermal;
346 struct feature_fme_tmp_threshold temp_threshold;
348 thermal = get_fme_feature_ioaddr_by_index(fme,
349 FME_FEATURE_ID_THERMAL_MGMT);
351 temp_threshold.csr = readq(&thermal->threshold);
352 *thres1_reached = temp_threshold.thshold2_status;
357 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
360 struct feature_fme_thermal *thermal;
361 struct feature_fme_tmp_threshold temp_threshold;
363 thermal = get_fme_feature_ioaddr_by_index(fme,
364 FME_FEATURE_ID_THERMAL_MGMT);
366 temp_threshold.csr = readq(&thermal->threshold);
367 *thres1_policy = temp_threshold.thshold_policy;
372 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
375 struct feature_fme_thermal *thermal;
376 struct feature_fme_tmp_threshold tmp_threshold;
378 thermal = get_fme_feature_ioaddr_by_index(fme,
379 FME_FEATURE_ID_THERMAL_MGMT);
381 spinlock_lock(&fme->lock);
382 tmp_threshold.csr = readq(&thermal->threshold);
384 if (thres1_policy == 0) {
385 tmp_threshold.thshold_policy = 0;
386 } else if (thres1_policy == 1) {
387 tmp_threshold.thshold_policy = 1;
389 spinlock_unlock(&fme->lock);
393 writeq(tmp_threshold.csr, &thermal->threshold);
394 spinlock_unlock(&fme->lock);
399 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
401 struct feature_fme_thermal *thermal;
402 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
404 thermal = get_fme_feature_ioaddr_by_index(fme,
405 FME_FEATURE_ID_THERMAL_MGMT);
407 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
408 *temp = temp_rdsensor_fmt1.fpga_temp;
413 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
415 struct feature_fme_thermal *fme_thermal
416 = get_fme_feature_ioaddr_by_index(fme,
417 FME_FEATURE_ID_THERMAL_MGMT);
418 struct feature_header header;
420 header.csr = readq(&fme_thermal->header);
421 *revision = header.revision;
426 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
428 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
430 struct feature_fme_thermal *fme_thermal;
431 struct feature_fme_tmp_threshold_cap thermal_cap;
435 dev_info(NULL, "FME thermal mgmt Init.\n");
437 fme_thermal = (struct feature_fme_thermal *)feature->addr;
438 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
440 dev_info(NULL, "FME thermal cap %llx.\n",
441 (unsigned long long)fme_thermal->threshold_cap.csr);
443 if (thermal_cap.tmp_thshold_disabled)
444 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
449 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
453 dev_info(NULL, "FME thermal mgmt UInit.\n");
457 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
459 struct ifpga_fme_hw *fme = feature->parent;
461 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
464 switch (prop->prop_id) {
465 case FME_THERMAL_PROP_THRESHOLD1:
466 return fme_thermal_set_threshold1(fme, prop->data);
467 case FME_THERMAL_PROP_THRESHOLD2:
468 return fme_thermal_set_threshold2(fme, prop->data);
469 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
470 return fme_thermal_set_threshold1_policy(fme, prop->data);
477 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
479 struct ifpga_fme_hw *fme = feature->parent;
481 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
482 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
483 prop->prop_id != FME_THERMAL_PROP_REVISION)
486 switch (prop->prop_id) {
487 case FME_THERMAL_PROP_THRESHOLD1:
488 return fme_thermal_get_threshold1(fme, &prop->data);
489 case FME_THERMAL_PROP_THRESHOLD2:
490 return fme_thermal_get_threshold2(fme, &prop->data);
491 case FME_THERMAL_PROP_THRESHOLD_TRIP:
492 return fme_thermal_get_threshold_trip(fme, &prop->data);
493 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
494 return fme_thermal_get_threshold1_reached(fme, &prop->data);
495 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
496 return fme_thermal_get_threshold2_reached(fme, &prop->data);
497 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
498 return fme_thermal_get_threshold1_policy(fme, &prop->data);
499 case FME_THERMAL_PROP_TEMPERATURE:
500 return fme_thermal_get_temperature(fme, &prop->data);
501 case FME_THERMAL_PROP_REVISION:
502 return fme_thermal_get_revision(fme, &prop->data);
508 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
509 .init = fme_thermal_mgmt_init,
510 .uinit = fme_thermal_mgmt_uinit,
511 .get_prop = fme_thermal_get_prop,
512 .set_prop = fme_thermal_set_prop,
515 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
517 struct feature_fme_power *fme_power
518 = get_fme_feature_ioaddr_by_index(fme,
519 FME_FEATURE_ID_POWER_MGMT);
520 struct feature_fme_pm_status pm_status;
522 pm_status.csr = readq(&fme_power->status);
524 *consumed = pm_status.pwr_consumed;
529 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
531 struct feature_fme_power *fme_power
532 = get_fme_feature_ioaddr_by_index(fme,
533 FME_FEATURE_ID_POWER_MGMT);
534 struct feature_fme_pm_ap_threshold pm_ap_threshold;
536 pm_ap_threshold.csr = readq(&fme_power->threshold);
538 *threshold = pm_ap_threshold.threshold1;
543 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
545 struct feature_fme_power *fme_power
546 = get_fme_feature_ioaddr_by_index(fme,
547 FME_FEATURE_ID_POWER_MGMT);
548 struct feature_fme_pm_ap_threshold pm_ap_threshold;
550 spinlock_lock(&fme->lock);
551 pm_ap_threshold.csr = readq(&fme_power->threshold);
553 if (threshold <= PWR_THRESHOLD_MAX) {
554 pm_ap_threshold.threshold1 = threshold;
556 spinlock_unlock(&fme->lock);
560 writeq(pm_ap_threshold.csr, &fme_power->threshold);
561 spinlock_unlock(&fme->lock);
566 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
568 struct feature_fme_power *fme_power
569 = get_fme_feature_ioaddr_by_index(fme,
570 FME_FEATURE_ID_POWER_MGMT);
571 struct feature_fme_pm_ap_threshold pm_ap_threshold;
573 pm_ap_threshold.csr = readq(&fme_power->threshold);
575 *threshold = pm_ap_threshold.threshold2;
580 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
582 struct feature_fme_power *fme_power
583 = get_fme_feature_ioaddr_by_index(fme,
584 FME_FEATURE_ID_POWER_MGMT);
585 struct feature_fme_pm_ap_threshold pm_ap_threshold;
587 spinlock_lock(&fme->lock);
588 pm_ap_threshold.csr = readq(&fme_power->threshold);
590 if (threshold <= PWR_THRESHOLD_MAX) {
591 pm_ap_threshold.threshold2 = threshold;
593 spinlock_unlock(&fme->lock);
597 writeq(pm_ap_threshold.csr, &fme_power->threshold);
598 spinlock_unlock(&fme->lock);
603 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
604 u64 *threshold_status)
606 struct feature_fme_power *fme_power
607 = get_fme_feature_ioaddr_by_index(fme,
608 FME_FEATURE_ID_POWER_MGMT);
609 struct feature_fme_pm_ap_threshold pm_ap_threshold;
611 pm_ap_threshold.csr = readq(&fme_power->threshold);
613 *threshold_status = pm_ap_threshold.threshold1_status;
618 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
619 u64 *threshold_status)
621 struct feature_fme_power *fme_power
622 = get_fme_feature_ioaddr_by_index(fme,
623 FME_FEATURE_ID_POWER_MGMT);
624 struct feature_fme_pm_ap_threshold pm_ap_threshold;
626 pm_ap_threshold.csr = readq(&fme_power->threshold);
628 *threshold_status = pm_ap_threshold.threshold2_status;
633 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
635 struct feature_fme_power *fme_power
636 = get_fme_feature_ioaddr_by_index(fme,
637 FME_FEATURE_ID_POWER_MGMT);
638 struct feature_fme_pm_status pm_status;
640 pm_status.csr = readq(&fme_power->status);
642 *rtl = pm_status.fpga_latency_report;
647 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
649 struct feature_fme_power *fme_power
650 = get_fme_feature_ioaddr_by_index(fme,
651 FME_FEATURE_ID_POWER_MGMT);
652 struct feature_fme_pm_xeon_limit xeon_limit;
654 xeon_limit.csr = readq(&fme_power->xeon_limit);
656 if (!xeon_limit.enable)
657 xeon_limit.pwr_limit = 0;
659 *limit = xeon_limit.pwr_limit;
664 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
666 struct feature_fme_power *fme_power
667 = get_fme_feature_ioaddr_by_index(fme,
668 FME_FEATURE_ID_POWER_MGMT);
669 struct feature_fme_pm_fpga_limit fpga_limit;
671 fpga_limit.csr = readq(&fme_power->fpga_limit);
673 if (!fpga_limit.enable)
674 fpga_limit.pwr_limit = 0;
676 *limit = fpga_limit.pwr_limit;
681 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
683 struct feature_fme_power *fme_power
684 = get_fme_feature_ioaddr_by_index(fme,
685 FME_FEATURE_ID_POWER_MGMT);
686 struct feature_header header;
688 header.csr = readq(&fme_power->header);
689 *revision = header.revision;
694 static int fme_power_mgmt_init(struct ifpga_feature *feature)
698 dev_info(NULL, "FME power mgmt Init.\n");
703 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
707 dev_info(NULL, "FME power mgmt UInit.\n");
710 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
711 struct feature_prop *prop)
713 struct ifpga_fme_hw *fme = feature->parent;
715 switch (prop->prop_id) {
716 case FME_PWR_PROP_CONSUMED:
717 return fme_pwr_get_consumed(fme, &prop->data);
718 case FME_PWR_PROP_THRESHOLD1:
719 return fme_pwr_get_threshold1(fme, &prop->data);
720 case FME_PWR_PROP_THRESHOLD2:
721 return fme_pwr_get_threshold2(fme, &prop->data);
722 case FME_PWR_PROP_THRESHOLD1_STATUS:
723 return fme_pwr_get_threshold1_status(fme, &prop->data);
724 case FME_PWR_PROP_THRESHOLD2_STATUS:
725 return fme_pwr_get_threshold2_status(fme, &prop->data);
726 case FME_PWR_PROP_RTL:
727 return fme_pwr_get_rtl(fme, &prop->data);
728 case FME_PWR_PROP_XEON_LIMIT:
729 return fme_pwr_get_xeon_limit(fme, &prop->data);
730 case FME_PWR_PROP_FPGA_LIMIT:
731 return fme_pwr_get_fpga_limit(fme, &prop->data);
732 case FME_PWR_PROP_REVISION:
733 return fme_pwr_get_revision(fme, &prop->data);
739 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
740 struct feature_prop *prop)
742 struct ifpga_fme_hw *fme = feature->parent;
744 switch (prop->prop_id) {
745 case FME_PWR_PROP_THRESHOLD1:
746 return fme_pwr_set_threshold1(fme, prop->data);
747 case FME_PWR_PROP_THRESHOLD2:
748 return fme_pwr_set_threshold2(fme, prop->data);
754 struct ifpga_feature_ops fme_power_mgmt_ops = {
755 .init = fme_power_mgmt_init,
756 .uinit = fme_power_mgmt_uinit,
757 .get_prop = fme_power_mgmt_get_prop,
758 .set_prop = fme_power_mgmt_set_prop,
761 static int fme_hssi_eth_init(struct ifpga_feature *feature)
767 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
772 struct ifpga_feature_ops fme_hssi_eth_ops = {
773 .init = fme_hssi_eth_init,
774 .uinit = fme_hssi_eth_uinit,
777 static int fme_emif_init(struct ifpga_feature *feature)
783 static void fme_emif_uinit(struct ifpga_feature *feature)
788 struct ifpga_feature_ops fme_emif_ops = {
789 .init = fme_emif_init,
790 .uinit = fme_emif_uinit,
793 static const char *board_type_to_string(u32 type)
803 return "VC_4x25G+2x25G";
811 static const char *board_major_to_string(u32 major)
815 return "VISTA_CREEK";
819 return "DARBY_CREEK";
825 static int board_type_to_info(u32 type,
826 struct opae_board_info *info)
830 info->nums_of_retimer = 2;
831 info->ports_per_retimer = 4;
832 info->nums_of_fvl = 2;
833 info->ports_per_fvl = 4;
836 info->nums_of_retimer = 1;
837 info->ports_per_retimer = 4;
838 info->nums_of_fvl = 2;
839 info->ports_per_fvl = 2;
842 info->nums_of_retimer = 2;
843 info->ports_per_retimer = 1;
844 info->nums_of_fvl = 1;
845 info->ports_per_fvl = 2;
848 info->nums_of_retimer = 2;
849 info->ports_per_retimer = 2;
850 info->nums_of_fvl = 2;
851 info->ports_per_fvl = 2;
860 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
862 struct fme_bitstream_id id;
870 if (fme_hdr_get_bitstream_id(fme, &id.id))
873 fme->board_info.major = id.major;
874 fme->board_info.minor = id.minor;
875 fme->board_info.type = id.interface;
876 fme->board_info.fvl_bypass = id.fvl_bypass;
877 fme->board_info.mac_lightweight = id.mac_lightweight;
878 fme->board_info.lightweight = id.lightweiht;
879 fme->board_info.disaggregate = id.disagregate;
880 fme->board_info.seu = id.seu;
881 fme->board_info.ptp = id.ptp;
883 dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
886 hw->pci_data->function,
887 board_major_to_string(fme->board_info.major),
888 board_type_to_string(fme->board_info.type));
890 dev_info(fme, "support feature:\n"
892 "mac_lightweight:%s\n"
897 check_support(fme->board_info.fvl_bypass),
898 check_support(fme->board_info.mac_lightweight),
899 check_support(fme->board_info.lightweight),
900 check_support(fme->board_info.disaggregate),
901 check_support(fme->board_info.seu),
902 check_support(fme->board_info.ptp));
905 if (board_type_to_info(fme->board_info.type, &fme->board_info))
908 dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
909 fme->board_info.nums_of_retimer,
910 fme->board_info.ports_per_retimer,
911 fme->board_info.nums_of_fvl,
912 fme->board_info.ports_per_fvl);
914 if (max10_sys_read(fme->max10_dev, FPGA_PAGE_INFO, &val))
916 fme->board_info.boot_page = val & 0x7;
918 if (max10_sys_read(fme->max10_dev, MAX10_BUILD_VER, &val))
920 fme->board_info.max10_version = val;
922 if (max10_sys_read(fme->max10_dev, NIOS2_FW_VERSION, &val))
924 fme->board_info.nios_fw_version = val;
926 dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
927 fme->board_info.max10_version,
928 fme->board_info.nios_fw_version);
933 static int spi_self_checking(struct intel_max10_device *dev)
938 ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
942 dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
947 static void init_spi_share_data(struct ifpga_fme_hw *fme,
948 struct altera_spi_device *spi)
950 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
951 opae_share_data *sd = NULL;
953 if (hw && hw->adapter && hw->adapter->shm.ptr) {
954 dev_info(NULL, "transfer share data to spi\n");
955 sd = (opae_share_data *)hw->adapter->shm.ptr;
956 spi->mutex = &sd->spi_mutex;
957 spi->dtb_sz_ptr = &sd->dtb_size;
961 spi->dtb_sz_ptr = NULL;
966 static int fme_spi_init(struct ifpga_feature *feature)
968 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
969 struct altera_spi_device *spi_master;
970 struct intel_max10_device *max10;
973 dev_info(fme, "FME SPI Master (Max10) Init.\n");
974 dev_debug(fme, "FME SPI base addr %p.\n",
976 dev_debug(fme, "spi param=0x%llx\n",
977 (unsigned long long)opae_readq(feature->addr + 0x8));
979 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
982 init_spi_share_data(fme, spi_master);
984 altera_spi_init(spi_master);
986 max10 = intel_max10_device_probe(spi_master, 0);
989 dev_err(fme, "max10 init fail\n");
993 fme->max10_dev = max10;
996 if (spi_self_checking(max10)) {
1004 intel_max10_device_remove(fme->max10_dev);
1006 altera_spi_release(spi_master);
1010 static void fme_spi_uinit(struct ifpga_feature *feature)
1012 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1015 intel_max10_device_remove(fme->max10_dev);
1018 struct ifpga_feature_ops fme_spi_master_ops = {
1019 .init = fme_spi_init,
1020 .uinit = fme_spi_uinit,
1023 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
1026 unsigned long timeout = rte_get_timer_cycles() +
1027 msecs_to_timer_cycles(10000);
1028 unsigned long ticks;
1030 int fecmode = FEC_MODE_NO;
1032 if (spi_reg_read(dev, NIOS_VERSION, &val))
1036 (val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT;
1037 dev_info(dev, "A10 NIOS FW version %d\n", major_version);
1039 if (major_version >= 3) {
1040 /* read NIOS_INIT to check if PKVL INIT done or not */
1041 if (spi_reg_read(dev, NIOS_INIT, &val))
1044 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1046 /* check if PKVLs are initialized already */
1047 if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
1048 goto nios_init_done;
1050 /* start to config the default FEC mode */
1051 val = fecmode | NIOS_INIT_START;
1053 if (spi_reg_write(dev, NIOS_INIT, val))
1059 if (spi_reg_read(dev, NIOS_INIT, &val))
1061 if (val & NIOS_INIT_DONE)
1064 ticks = rte_get_timer_cycles();
1065 if (time_after(ticks, timeout))
1070 /* get the fecmode */
1071 if (spi_reg_read(dev, NIOS_INIT, &val))
1073 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1074 fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT;
1075 dev_info(dev, "fecmode: 0x%x, %s\n", fecmode,
1076 (fecmode == FEC_MODE_KR) ? "kr" :
1077 ((fecmode == FEC_MODE_RS) ? "rs" : "no"));
1082 static int nios_spi_check_error(struct altera_spi_device *dev)
1086 if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
1089 dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
1094 if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
1097 dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
1105 static int fme_nios_spi_init(struct ifpga_feature *feature)
1107 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1108 struct altera_spi_device *spi_master;
1109 struct intel_max10_device *max10;
1110 struct ifpga_hw *hw;
1111 struct opae_manager *mgr;
1118 mgr = hw->adapter->mgr;
1122 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
1123 dev_debug(fme, "FME SPI base addr %p.\n",
1125 dev_debug(fme, "spi param=0x%llx\n",
1126 (unsigned long long)opae_readq(feature->addr + 0x8));
1128 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1131 init_spi_share_data(fme, spi_master);
1134 * 1. wait A10 NIOS initial finished and
1135 * release the SPI master to Host
1137 if (spi_master->mutex)
1138 pthread_mutex_lock(spi_master->mutex);
1140 ret = nios_spi_wait_init_done(spi_master);
1142 dev_err(fme, "FME NIOS_SPI init fail\n");
1143 if (spi_master->mutex)
1144 pthread_mutex_unlock(spi_master->mutex);
1148 dev_info(fme, "FME NIOS_SPI initial done\n");
1150 /* 2. check if error occur? */
1151 if (nios_spi_check_error(spi_master))
1152 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1154 if (spi_master->mutex)
1155 pthread_mutex_unlock(spi_master->mutex);
1157 /* 3. init the spi master*/
1158 altera_spi_init(spi_master);
1160 /* init the max10 device */
1161 max10 = intel_max10_device_probe(spi_master, 0);
1164 dev_err(fme, "max10 init fail\n");
1168 fme->max10_dev = max10;
1170 max10->bus = hw->pci_data->bus;
1172 fme_get_board_interface(fme);
1174 mgr->sensor_list = &max10->opae_sensor_list;
1177 if (spi_self_checking(max10))
1180 ret = init_sec_mgr(fme);
1182 dev_err(fme, "security manager init fail\n");
1189 intel_max10_device_remove(fme->max10_dev);
1191 altera_spi_release(spi_master);
1195 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1197 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1199 release_sec_mgr(fme);
1201 intel_max10_device_remove(fme->max10_dev);
1204 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1205 .init = fme_nios_spi_init,
1206 .uinit = fme_nios_spi_uinit,
1209 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1213 char read_buf[20] = {0,};
1214 const char *string = "1a2b3c4d5e";
1216 opae_memcpy(buf, string, strlen(string));
1218 ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1219 (u8 *)buf, strlen(string));
1221 dev_err(NULL, "write i2c error:%d\n", ret);
1225 ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1226 (u8 *)read_buf, strlen(string));
1228 dev_err(NULL, "read i2c error:%d\n", ret);
1232 if (memcmp(buf, read_buf, strlen(string))) {
1233 dev_err(NULL, "%s test fail!\n", __func__);
1237 dev_info(NULL, "%s test successful\n", __func__);
1242 static void init_i2c_mutex(struct ifpga_fme_hw *fme)
1244 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
1245 struct altera_i2c_dev *i2c_dev;
1246 opae_share_data *sd = NULL;
1248 if (fme->i2c_master) {
1249 i2c_dev = (struct altera_i2c_dev *)fme->i2c_master;
1250 if (hw && hw->adapter && hw->adapter->shm.ptr) {
1251 dev_info(NULL, "use multi-process mutex in i2c\n");
1252 sd = (opae_share_data *)hw->adapter->shm.ptr;
1253 i2c_dev->mutex = &sd->i2c_mutex;
1255 dev_info(NULL, "use multi-thread mutex in i2c\n");
1256 i2c_dev->mutex = &i2c_dev->lock;
1261 static int fme_i2c_init(struct ifpga_feature *feature)
1263 struct feature_fme_i2c *i2c;
1264 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1266 i2c = (struct feature_fme_i2c *)feature->addr;
1268 dev_info(NULL, "FME I2C Master Init.\n");
1270 fme->i2c_master = altera_i2c_probe(i2c);
1271 if (!fme->i2c_master)
1274 init_i2c_mutex(fme);
1276 /* MAC ROM self test */
1277 i2c_mac_rom_test(fme->i2c_master);
1282 static void fme_i2c_uninit(struct ifpga_feature *feature)
1284 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1286 altera_i2c_remove(fme->i2c_master);
1289 struct ifpga_feature_ops fme_i2c_master_ops = {
1290 .init = fme_i2c_init,
1291 .uinit = fme_i2c_uninit,
1294 static int fme_eth_group_init(struct ifpga_feature *feature)
1296 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1297 struct eth_group_device *dev;
1299 dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1303 fme->eth_dev[dev->group_id] = dev;
1305 fme->eth_group_region[dev->group_id].addr =
1307 fme->eth_group_region[dev->group_id].phys_addr =
1309 fme->eth_group_region[dev->group_id].len =
1312 fme->nums_eth_dev++;
1314 dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1315 dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1316 dev->group_id, feature->addr,
1317 (unsigned long long)feature->phys_addr,
1323 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1328 struct ifpga_feature_ops fme_eth_group_ops = {
1329 .init = fme_eth_group_init,
1330 .uinit = fme_eth_group_uinit,
1333 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1334 void *buf, int size)
1336 struct altera_i2c_dev *dev;
1338 dev = fme->i2c_master;
1342 return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1345 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1346 void *buf, int size)
1348 struct altera_i2c_dev *dev;
1350 dev = fme->i2c_master;
1354 return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1357 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1360 struct eth_group_device *dev;
1362 if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1365 dev = (struct eth_group_device *)fme->eth_dev[group_id];
1369 if (dev->status != ETH_GROUP_DEV_ATTACHED)
1375 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1377 return fme->nums_eth_dev;
1380 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1381 u8 group_id, struct opae_eth_group_info *info)
1383 struct eth_group_device *dev;
1385 dev = get_eth_group_dev(fme, group_id);
1389 info->group_id = group_id;
1390 info->speed = dev->speed;
1391 info->nums_of_mac = dev->mac_num;
1392 info->nums_of_phy = dev->phy_num;
1397 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1398 u8 type, u8 index, u16 addr, u32 *data)
1400 struct eth_group_device *dev;
1402 dev = get_eth_group_dev(fme, group_id);
1406 return eth_group_read_reg(dev, type, index, addr, data);
1409 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1410 u8 type, u8 index, u16 addr, u32 data)
1412 struct eth_group_device *dev;
1414 dev = get_eth_group_dev(fme, group_id);
1418 return eth_group_write_reg(dev, type, index, addr, data);
1421 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1424 struct eth_group_device *dev;
1426 dev = get_eth_group_dev(fme, group_id);
1433 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1434 struct opae_retimer_info *info)
1436 struct intel_max10_device *dev;
1438 dev = (struct intel_max10_device *)fme->max10_dev;
1442 info->nums_retimer = fme->board_info.nums_of_retimer;
1443 info->ports_per_retimer = fme->board_info.ports_per_retimer;
1444 info->nums_fvl = fme->board_info.nums_of_fvl;
1445 info->ports_per_fvl = fme->board_info.ports_per_fvl;
1447 /* The speed of PKVL is identical the eth group's speed */
1448 info->support_speed = fme_get_eth_group_speed(fme,
1449 LINE_SIDE_GROUP_ID);
1454 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1455 struct opae_retimer_status *status)
1457 struct intel_max10_device *dev;
1460 dev = (struct intel_max10_device *)fme->max10_dev;
1464 if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
1465 dev_err(dev, "%s: read pkvl status fail\n", __func__);
1469 /* The speed of PKVL is identical the eth group's speed */
1470 status->speed = fme_get_eth_group_speed(fme,
1471 LINE_SIDE_GROUP_ID);
1473 status->line_link_bitmap = val;
1475 dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1477 status->line_link_bitmap);
1482 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1483 struct opae_sensor_info *sensor,
1484 unsigned int *value)
1486 struct intel_max10_device *dev;
1488 dev = (struct intel_max10_device *)fme->max10_dev;
1492 if (max10_sys_read(dev, sensor->value_reg, value)) {
1493 dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1494 __func__, sensor->value_reg);
1498 *value *= sensor->multiplier;