1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #define PWR_THRESHOLD_MAX 0x7F
9 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
11 struct feature *feature;
16 feature = get_fme_feature_by_id(fme, prop->feature_id);
18 if (feature && feature->ops && feature->ops->get_prop)
19 return feature->ops->get_prop(feature, prop);
24 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
26 struct feature *feature;
31 feature = get_fme_feature_by_id(fme, prop->feature_id);
33 if (feature && feature->ops && feature->ops->set_prop)
34 return feature->ops->set_prop(feature, prop);
39 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
41 struct feature *feature;
46 feature = get_fme_feature_by_id(fme, feature_id);
48 if (feature && feature->ops && feature->ops->set_irq)
49 return feature->ops->set_irq(feature, irq_set);
54 /* fme private feature head */
55 static int fme_hdr_init(struct feature *feature)
57 struct feature_fme_header *fme_hdr;
59 fme_hdr = (struct feature_fme_header *)feature->addr;
61 dev_info(NULL, "FME HDR Init.\n");
62 dev_info(NULL, "FME cap %llx.\n",
63 (unsigned long long)fme_hdr->capability.csr);
68 static void fme_hdr_uinit(struct feature *feature)
72 dev_info(NULL, "FME HDR UInit.\n");
75 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
77 struct feature_fme_header *fme_hdr
78 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
79 struct feature_header header;
81 header.csr = readq(&fme_hdr->header);
82 *revision = header.revision;
87 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
89 struct feature_fme_header *fme_hdr
90 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
91 struct feature_fme_capability fme_capability;
93 fme_capability.csr = readq(&fme_hdr->capability);
94 *ports_num = fme_capability.num_ports;
99 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
101 struct feature_fme_header *fme_hdr
102 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
103 struct feature_fme_capability fme_capability;
105 fme_capability.csr = readq(&fme_hdr->capability);
106 *cache_size = fme_capability.cache_size;
111 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
113 struct feature_fme_header *fme_hdr
114 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
115 struct feature_fme_capability fme_capability;
117 fme_capability.csr = readq(&fme_hdr->capability);
118 *version = fme_capability.fabric_verid;
123 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
125 struct feature_fme_header *fme_hdr
126 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
127 struct feature_fme_capability fme_capability;
129 fme_capability.csr = readq(&fme_hdr->capability);
130 *socket_id = fme_capability.socket_id;
135 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
138 struct feature_fme_header *fme_hdr
139 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
141 *bitstream_id = readq(&fme_hdr->bitstream_id);
146 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
147 u64 *bitstream_metadata)
149 struct feature_fme_header *fme_hdr
150 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
152 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
158 fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
160 struct ifpga_fme_hw *fme = feature->parent;
162 switch (prop->prop_id) {
163 case FME_HDR_PROP_REVISION:
164 return fme_hdr_get_revision(fme, &prop->data);
165 case FME_HDR_PROP_PORTS_NUM:
166 return fme_hdr_get_ports_num(fme, &prop->data);
167 case FME_HDR_PROP_CACHE_SIZE:
168 return fme_hdr_get_cache_size(fme, &prop->data);
169 case FME_HDR_PROP_VERSION:
170 return fme_hdr_get_version(fme, &prop->data);
171 case FME_HDR_PROP_SOCKET_ID:
172 return fme_hdr_get_socket_id(fme, &prop->data);
173 case FME_HDR_PROP_BITSTREAM_ID:
174 return fme_hdr_get_bitstream_id(fme, &prop->data);
175 case FME_HDR_PROP_BITSTREAM_METADATA:
176 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
182 struct feature_ops fme_hdr_ops = {
183 .init = fme_hdr_init,
184 .uinit = fme_hdr_uinit,
185 .get_prop = fme_hdr_get_prop,
188 /* thermal management */
189 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
191 struct feature_fme_thermal *thermal;
192 struct feature_fme_tmp_threshold temp_threshold;
194 thermal = get_fme_feature_ioaddr_by_index(fme,
195 FME_FEATURE_ID_THERMAL_MGMT);
197 temp_threshold.csr = readq(&thermal->threshold);
198 *thres1 = temp_threshold.tmp_thshold1;
203 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
205 struct feature_fme_thermal *thermal;
206 struct feature_fme_header *fme_hdr;
207 struct feature_fme_tmp_threshold tmp_threshold;
208 struct feature_fme_capability fme_capability;
210 thermal = get_fme_feature_ioaddr_by_index(fme,
211 FME_FEATURE_ID_THERMAL_MGMT);
212 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
214 spinlock_lock(&fme->lock);
215 tmp_threshold.csr = readq(&thermal->threshold);
216 fme_capability.csr = readq(&fme_hdr->capability);
218 if (fme_capability.lock_bit == 1) {
219 spinlock_unlock(&fme->lock);
221 } else if (thres1 > 100) {
222 spinlock_unlock(&fme->lock);
224 } else if (thres1 == 0) {
225 tmp_threshold.tmp_thshold1_enable = 0;
226 tmp_threshold.tmp_thshold1 = thres1;
228 tmp_threshold.tmp_thshold1_enable = 1;
229 tmp_threshold.tmp_thshold1 = thres1;
232 writeq(tmp_threshold.csr, &thermal->threshold);
233 spinlock_unlock(&fme->lock);
238 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
240 struct feature_fme_thermal *thermal;
241 struct feature_fme_tmp_threshold temp_threshold;
243 thermal = get_fme_feature_ioaddr_by_index(fme,
244 FME_FEATURE_ID_THERMAL_MGMT);
246 temp_threshold.csr = readq(&thermal->threshold);
247 *thres2 = temp_threshold.tmp_thshold2;
252 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
254 struct feature_fme_thermal *thermal;
255 struct feature_fme_header *fme_hdr;
256 struct feature_fme_tmp_threshold tmp_threshold;
257 struct feature_fme_capability fme_capability;
259 thermal = get_fme_feature_ioaddr_by_index(fme,
260 FME_FEATURE_ID_THERMAL_MGMT);
261 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
263 spinlock_lock(&fme->lock);
264 tmp_threshold.csr = readq(&thermal->threshold);
265 fme_capability.csr = readq(&fme_hdr->capability);
267 if (fme_capability.lock_bit == 1) {
268 spinlock_unlock(&fme->lock);
270 } else if (thres2 > 100) {
271 spinlock_unlock(&fme->lock);
273 } else if (thres2 == 0) {
274 tmp_threshold.tmp_thshold2_enable = 0;
275 tmp_threshold.tmp_thshold2 = thres2;
277 tmp_threshold.tmp_thshold2_enable = 1;
278 tmp_threshold.tmp_thshold2 = thres2;
281 writeq(tmp_threshold.csr, &thermal->threshold);
282 spinlock_unlock(&fme->lock);
287 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
290 struct feature_fme_thermal *thermal;
291 struct feature_fme_tmp_threshold temp_threshold;
293 thermal = get_fme_feature_ioaddr_by_index(fme,
294 FME_FEATURE_ID_THERMAL_MGMT);
296 temp_threshold.csr = readq(&thermal->threshold);
297 *thres_trip = temp_threshold.therm_trip_thshold;
302 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
305 struct feature_fme_thermal *thermal;
306 struct feature_fme_tmp_threshold temp_threshold;
308 thermal = get_fme_feature_ioaddr_by_index(fme,
309 FME_FEATURE_ID_THERMAL_MGMT);
311 temp_threshold.csr = readq(&thermal->threshold);
312 *thres1_reached = temp_threshold.thshold1_status;
317 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
320 struct feature_fme_thermal *thermal;
321 struct feature_fme_tmp_threshold temp_threshold;
323 thermal = get_fme_feature_ioaddr_by_index(fme,
324 FME_FEATURE_ID_THERMAL_MGMT);
326 temp_threshold.csr = readq(&thermal->threshold);
327 *thres1_reached = temp_threshold.thshold2_status;
332 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
335 struct feature_fme_thermal *thermal;
336 struct feature_fme_tmp_threshold temp_threshold;
338 thermal = get_fme_feature_ioaddr_by_index(fme,
339 FME_FEATURE_ID_THERMAL_MGMT);
341 temp_threshold.csr = readq(&thermal->threshold);
342 *thres1_policy = temp_threshold.thshold_policy;
347 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
350 struct feature_fme_thermal *thermal;
351 struct feature_fme_tmp_threshold tmp_threshold;
353 thermal = get_fme_feature_ioaddr_by_index(fme,
354 FME_FEATURE_ID_THERMAL_MGMT);
356 spinlock_lock(&fme->lock);
357 tmp_threshold.csr = readq(&thermal->threshold);
359 if (thres1_policy == 0) {
360 tmp_threshold.thshold_policy = 0;
361 } else if (thres1_policy == 1) {
362 tmp_threshold.thshold_policy = 1;
364 spinlock_unlock(&fme->lock);
368 writeq(tmp_threshold.csr, &thermal->threshold);
369 spinlock_unlock(&fme->lock);
374 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
376 struct feature_fme_thermal *thermal;
377 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
379 thermal = get_fme_feature_ioaddr_by_index(fme,
380 FME_FEATURE_ID_THERMAL_MGMT);
382 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
383 *temp = temp_rdsensor_fmt1.fpga_temp;
388 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
390 struct feature_fme_thermal *fme_thermal
391 = get_fme_feature_ioaddr_by_index(fme,
392 FME_FEATURE_ID_THERMAL_MGMT);
393 struct feature_header header;
395 header.csr = readq(&fme_thermal->header);
396 *revision = header.revision;
401 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
403 static int fme_thermal_mgmt_init(struct feature *feature)
405 struct feature_fme_thermal *fme_thermal;
406 struct feature_fme_tmp_threshold_cap thermal_cap;
410 dev_info(NULL, "FME thermal mgmt Init.\n");
412 fme_thermal = (struct feature_fme_thermal *)feature->addr;
413 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
415 dev_info(NULL, "FME thermal cap %llx.\n",
416 (unsigned long long)fme_thermal->threshold_cap.csr);
418 if (thermal_cap.tmp_thshold_disabled)
419 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
424 static void fme_thermal_mgmt_uinit(struct feature *feature)
428 dev_info(NULL, "FME thermal mgmt UInit.\n");
432 fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
434 struct ifpga_fme_hw *fme = feature->parent;
436 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
439 switch (prop->prop_id) {
440 case FME_THERMAL_PROP_THRESHOLD1:
441 return fme_thermal_set_threshold1(fme, prop->data);
442 case FME_THERMAL_PROP_THRESHOLD2:
443 return fme_thermal_set_threshold2(fme, prop->data);
444 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
445 return fme_thermal_set_threshold1_policy(fme, prop->data);
452 fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
454 struct ifpga_fme_hw *fme = feature->parent;
456 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
457 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
458 prop->prop_id != FME_THERMAL_PROP_REVISION)
461 switch (prop->prop_id) {
462 case FME_THERMAL_PROP_THRESHOLD1:
463 return fme_thermal_get_threshold1(fme, &prop->data);
464 case FME_THERMAL_PROP_THRESHOLD2:
465 return fme_thermal_get_threshold2(fme, &prop->data);
466 case FME_THERMAL_PROP_THRESHOLD_TRIP:
467 return fme_thermal_get_threshold_trip(fme, &prop->data);
468 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
469 return fme_thermal_get_threshold1_reached(fme, &prop->data);
470 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
471 return fme_thermal_get_threshold2_reached(fme, &prop->data);
472 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
473 return fme_thermal_get_threshold1_policy(fme, &prop->data);
474 case FME_THERMAL_PROP_TEMPERATURE:
475 return fme_thermal_get_temperature(fme, &prop->data);
476 case FME_THERMAL_PROP_REVISION:
477 return fme_thermal_get_revision(fme, &prop->data);
483 struct feature_ops fme_thermal_mgmt_ops = {
484 .init = fme_thermal_mgmt_init,
485 .uinit = fme_thermal_mgmt_uinit,
486 .get_prop = fme_thermal_get_prop,
487 .set_prop = fme_thermal_set_prop,
490 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
492 struct feature_fme_power *fme_power
493 = get_fme_feature_ioaddr_by_index(fme,
494 FME_FEATURE_ID_POWER_MGMT);
495 struct feature_fme_pm_status pm_status;
497 pm_status.csr = readq(&fme_power->status);
499 *consumed = pm_status.pwr_consumed;
504 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
506 struct feature_fme_power *fme_power
507 = get_fme_feature_ioaddr_by_index(fme,
508 FME_FEATURE_ID_POWER_MGMT);
509 struct feature_fme_pm_ap_threshold pm_ap_threshold;
511 pm_ap_threshold.csr = readq(&fme_power->threshold);
513 *threshold = pm_ap_threshold.threshold1;
518 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
520 struct feature_fme_power *fme_power
521 = get_fme_feature_ioaddr_by_index(fme,
522 FME_FEATURE_ID_POWER_MGMT);
523 struct feature_fme_pm_ap_threshold pm_ap_threshold;
525 spinlock_lock(&fme->lock);
526 pm_ap_threshold.csr = readq(&fme_power->threshold);
528 if (threshold <= PWR_THRESHOLD_MAX) {
529 pm_ap_threshold.threshold1 = threshold;
531 spinlock_unlock(&fme->lock);
535 writeq(pm_ap_threshold.csr, &fme_power->threshold);
536 spinlock_unlock(&fme->lock);
541 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
543 struct feature_fme_power *fme_power
544 = get_fme_feature_ioaddr_by_index(fme,
545 FME_FEATURE_ID_POWER_MGMT);
546 struct feature_fme_pm_ap_threshold pm_ap_threshold;
548 pm_ap_threshold.csr = readq(&fme_power->threshold);
550 *threshold = pm_ap_threshold.threshold2;
555 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
557 struct feature_fme_power *fme_power
558 = get_fme_feature_ioaddr_by_index(fme,
559 FME_FEATURE_ID_POWER_MGMT);
560 struct feature_fme_pm_ap_threshold pm_ap_threshold;
562 spinlock_lock(&fme->lock);
563 pm_ap_threshold.csr = readq(&fme_power->threshold);
565 if (threshold <= PWR_THRESHOLD_MAX) {
566 pm_ap_threshold.threshold2 = threshold;
568 spinlock_unlock(&fme->lock);
572 writeq(pm_ap_threshold.csr, &fme_power->threshold);
573 spinlock_unlock(&fme->lock);
578 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
579 u64 *threshold_status)
581 struct feature_fme_power *fme_power
582 = get_fme_feature_ioaddr_by_index(fme,
583 FME_FEATURE_ID_POWER_MGMT);
584 struct feature_fme_pm_ap_threshold pm_ap_threshold;
586 pm_ap_threshold.csr = readq(&fme_power->threshold);
588 *threshold_status = pm_ap_threshold.threshold1_status;
593 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
594 u64 *threshold_status)
596 struct feature_fme_power *fme_power
597 = get_fme_feature_ioaddr_by_index(fme,
598 FME_FEATURE_ID_POWER_MGMT);
599 struct feature_fme_pm_ap_threshold pm_ap_threshold;
601 pm_ap_threshold.csr = readq(&fme_power->threshold);
603 *threshold_status = pm_ap_threshold.threshold2_status;
608 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
610 struct feature_fme_power *fme_power
611 = get_fme_feature_ioaddr_by_index(fme,
612 FME_FEATURE_ID_POWER_MGMT);
613 struct feature_fme_pm_status pm_status;
615 pm_status.csr = readq(&fme_power->status);
617 *rtl = pm_status.fpga_latency_report;
622 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
624 struct feature_fme_power *fme_power
625 = get_fme_feature_ioaddr_by_index(fme,
626 FME_FEATURE_ID_POWER_MGMT);
627 struct feature_fme_pm_xeon_limit xeon_limit;
629 xeon_limit.csr = readq(&fme_power->xeon_limit);
631 if (!xeon_limit.enable)
632 xeon_limit.pwr_limit = 0;
634 *limit = xeon_limit.pwr_limit;
639 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
641 struct feature_fme_power *fme_power
642 = get_fme_feature_ioaddr_by_index(fme,
643 FME_FEATURE_ID_POWER_MGMT);
644 struct feature_fme_pm_fpga_limit fpga_limit;
646 fpga_limit.csr = readq(&fme_power->fpga_limit);
648 if (!fpga_limit.enable)
649 fpga_limit.pwr_limit = 0;
651 *limit = fpga_limit.pwr_limit;
656 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
658 struct feature_fme_power *fme_power
659 = get_fme_feature_ioaddr_by_index(fme,
660 FME_FEATURE_ID_POWER_MGMT);
661 struct feature_header header;
663 header.csr = readq(&fme_power->header);
664 *revision = header.revision;
669 static int fme_power_mgmt_init(struct feature *feature)
673 dev_info(NULL, "FME power mgmt Init.\n");
678 static void fme_power_mgmt_uinit(struct feature *feature)
682 dev_info(NULL, "FME power mgmt UInit.\n");
685 static int fme_power_mgmt_get_prop(struct feature *feature,
686 struct feature_prop *prop)
688 struct ifpga_fme_hw *fme = feature->parent;
690 switch (prop->prop_id) {
691 case FME_PWR_PROP_CONSUMED:
692 return fme_pwr_get_consumed(fme, &prop->data);
693 case FME_PWR_PROP_THRESHOLD1:
694 return fme_pwr_get_threshold1(fme, &prop->data);
695 case FME_PWR_PROP_THRESHOLD2:
696 return fme_pwr_get_threshold2(fme, &prop->data);
697 case FME_PWR_PROP_THRESHOLD1_STATUS:
698 return fme_pwr_get_threshold1_status(fme, &prop->data);
699 case FME_PWR_PROP_THRESHOLD2_STATUS:
700 return fme_pwr_get_threshold2_status(fme, &prop->data);
701 case FME_PWR_PROP_RTL:
702 return fme_pwr_get_rtl(fme, &prop->data);
703 case FME_PWR_PROP_XEON_LIMIT:
704 return fme_pwr_get_xeon_limit(fme, &prop->data);
705 case FME_PWR_PROP_FPGA_LIMIT:
706 return fme_pwr_get_fpga_limit(fme, &prop->data);
707 case FME_PWR_PROP_REVISION:
708 return fme_pwr_get_revision(fme, &prop->data);
714 static int fme_power_mgmt_set_prop(struct feature *feature,
715 struct feature_prop *prop)
717 struct ifpga_fme_hw *fme = feature->parent;
719 switch (prop->prop_id) {
720 case FME_PWR_PROP_THRESHOLD1:
721 return fme_pwr_set_threshold1(fme, prop->data);
722 case FME_PWR_PROP_THRESHOLD2:
723 return fme_pwr_set_threshold2(fme, prop->data);
729 struct feature_ops fme_power_mgmt_ops = {
730 .init = fme_power_mgmt_init,
731 .uinit = fme_power_mgmt_uinit,
732 .get_prop = fme_power_mgmt_get_prop,
733 .set_prop = fme_power_mgmt_set_prop,
736 static int fme_hssi_eth_init(struct feature *feature)
742 static void fme_hssi_eth_uinit(struct feature *feature)
747 struct feature_ops fme_hssi_eth_ops = {
748 .init = fme_hssi_eth_init,
749 .uinit = fme_hssi_eth_uinit,
752 static int fme_emif_init(struct feature *feature)
758 static void fme_emif_uinit(struct feature *feature)
763 struct feature_ops fme_emif_ops = {
764 .init = fme_emif_init,
765 .uinit = fme_emif_uinit,