1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #include "opae_intel_max10.h"
9 #include "opae_at24_eeprom.h"
10 #include "ifpga_sec_mgr.h"
12 #define PWR_THRESHOLD_MAX 0x7F
14 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
16 struct ifpga_feature *feature;
21 feature = get_fme_feature_by_id(fme, prop->feature_id);
23 if (feature && feature->ops && feature->ops->get_prop)
24 return feature->ops->get_prop(feature, prop);
29 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
31 struct ifpga_feature *feature;
36 feature = get_fme_feature_by_id(fme, prop->feature_id);
38 if (feature && feature->ops && feature->ops->set_prop)
39 return feature->ops->set_prop(feature, prop);
44 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
46 struct ifpga_feature *feature;
51 feature = get_fme_feature_by_id(fme, feature_id);
53 if (feature && feature->ops && feature->ops->set_irq)
54 return feature->ops->set_irq(feature, irq_set);
59 /* fme private feature head */
60 static int fme_hdr_init(struct ifpga_feature *feature)
62 struct feature_fme_header *fme_hdr;
64 fme_hdr = (struct feature_fme_header *)feature->addr;
66 dev_info(NULL, "FME HDR Init.\n");
67 dev_info(NULL, "FME cap %llx.\n",
68 (unsigned long long)fme_hdr->capability.csr);
73 static void fme_hdr_uinit(struct ifpga_feature *feature)
77 dev_info(NULL, "FME HDR UInit.\n");
80 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
82 struct feature_fme_header *fme_hdr
83 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
84 struct feature_header header;
86 header.csr = readq(&fme_hdr->header);
87 *revision = header.revision;
92 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
94 struct feature_fme_header *fme_hdr
95 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
96 struct feature_fme_capability fme_capability;
98 fme_capability.csr = readq(&fme_hdr->capability);
99 *ports_num = fme_capability.num_ports;
104 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
106 struct feature_fme_header *fme_hdr
107 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
108 struct feature_fme_capability fme_capability;
110 fme_capability.csr = readq(&fme_hdr->capability);
111 *cache_size = fme_capability.cache_size;
116 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
118 struct feature_fme_header *fme_hdr
119 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
120 struct feature_fme_capability fme_capability;
122 fme_capability.csr = readq(&fme_hdr->capability);
123 *version = fme_capability.fabric_verid;
128 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
130 struct feature_fme_header *fme_hdr
131 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
132 struct feature_fme_capability fme_capability;
134 fme_capability.csr = readq(&fme_hdr->capability);
135 *socket_id = fme_capability.socket_id;
140 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
143 struct feature_fme_header *fme_hdr
144 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
146 *bitstream_id = readq(&fme_hdr->bitstream_id);
151 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
152 u64 *bitstream_metadata)
154 struct feature_fme_header *fme_hdr
155 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
157 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
163 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
165 struct ifpga_fme_hw *fme = feature->parent;
167 switch (prop->prop_id) {
168 case FME_HDR_PROP_REVISION:
169 return fme_hdr_get_revision(fme, &prop->data);
170 case FME_HDR_PROP_PORTS_NUM:
171 return fme_hdr_get_ports_num(fme, &prop->data);
172 case FME_HDR_PROP_CACHE_SIZE:
173 return fme_hdr_get_cache_size(fme, &prop->data);
174 case FME_HDR_PROP_VERSION:
175 return fme_hdr_get_version(fme, &prop->data);
176 case FME_HDR_PROP_SOCKET_ID:
177 return fme_hdr_get_socket_id(fme, &prop->data);
178 case FME_HDR_PROP_BITSTREAM_ID:
179 return fme_hdr_get_bitstream_id(fme, &prop->data);
180 case FME_HDR_PROP_BITSTREAM_METADATA:
181 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
187 struct ifpga_feature_ops fme_hdr_ops = {
188 .init = fme_hdr_init,
189 .uinit = fme_hdr_uinit,
190 .get_prop = fme_hdr_get_prop,
193 /* thermal management */
194 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
196 struct feature_fme_thermal *thermal;
197 struct feature_fme_tmp_threshold temp_threshold;
199 thermal = get_fme_feature_ioaddr_by_index(fme,
200 FME_FEATURE_ID_THERMAL_MGMT);
202 temp_threshold.csr = readq(&thermal->threshold);
203 *thres1 = temp_threshold.tmp_thshold1;
208 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
210 struct feature_fme_thermal *thermal;
211 struct feature_fme_header *fme_hdr;
212 struct feature_fme_tmp_threshold tmp_threshold;
213 struct feature_fme_capability fme_capability;
215 thermal = get_fme_feature_ioaddr_by_index(fme,
216 FME_FEATURE_ID_THERMAL_MGMT);
217 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
219 spinlock_lock(&fme->lock);
220 tmp_threshold.csr = readq(&thermal->threshold);
221 fme_capability.csr = readq(&fme_hdr->capability);
223 if (fme_capability.lock_bit == 1) {
224 spinlock_unlock(&fme->lock);
226 } else if (thres1 > 100) {
227 spinlock_unlock(&fme->lock);
229 } else if (thres1 == 0) {
230 tmp_threshold.tmp_thshold1_enable = 0;
231 tmp_threshold.tmp_thshold1 = thres1;
233 tmp_threshold.tmp_thshold1_enable = 1;
234 tmp_threshold.tmp_thshold1 = thres1;
237 writeq(tmp_threshold.csr, &thermal->threshold);
238 spinlock_unlock(&fme->lock);
243 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
245 struct feature_fme_thermal *thermal;
246 struct feature_fme_tmp_threshold temp_threshold;
248 thermal = get_fme_feature_ioaddr_by_index(fme,
249 FME_FEATURE_ID_THERMAL_MGMT);
251 temp_threshold.csr = readq(&thermal->threshold);
252 *thres2 = temp_threshold.tmp_thshold2;
257 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
259 struct feature_fme_thermal *thermal;
260 struct feature_fme_header *fme_hdr;
261 struct feature_fme_tmp_threshold tmp_threshold;
262 struct feature_fme_capability fme_capability;
264 thermal = get_fme_feature_ioaddr_by_index(fme,
265 FME_FEATURE_ID_THERMAL_MGMT);
266 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
268 spinlock_lock(&fme->lock);
269 tmp_threshold.csr = readq(&thermal->threshold);
270 fme_capability.csr = readq(&fme_hdr->capability);
272 if (fme_capability.lock_bit == 1) {
273 spinlock_unlock(&fme->lock);
275 } else if (thres2 > 100) {
276 spinlock_unlock(&fme->lock);
278 } else if (thres2 == 0) {
279 tmp_threshold.tmp_thshold2_enable = 0;
280 tmp_threshold.tmp_thshold2 = thres2;
282 tmp_threshold.tmp_thshold2_enable = 1;
283 tmp_threshold.tmp_thshold2 = thres2;
286 writeq(tmp_threshold.csr, &thermal->threshold);
287 spinlock_unlock(&fme->lock);
292 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
295 struct feature_fme_thermal *thermal;
296 struct feature_fme_tmp_threshold temp_threshold;
298 thermal = get_fme_feature_ioaddr_by_index(fme,
299 FME_FEATURE_ID_THERMAL_MGMT);
301 temp_threshold.csr = readq(&thermal->threshold);
302 *thres_trip = temp_threshold.therm_trip_thshold;
307 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
310 struct feature_fme_thermal *thermal;
311 struct feature_fme_tmp_threshold temp_threshold;
313 thermal = get_fme_feature_ioaddr_by_index(fme,
314 FME_FEATURE_ID_THERMAL_MGMT);
316 temp_threshold.csr = readq(&thermal->threshold);
317 *thres1_reached = temp_threshold.thshold1_status;
322 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
325 struct feature_fme_thermal *thermal;
326 struct feature_fme_tmp_threshold temp_threshold;
328 thermal = get_fme_feature_ioaddr_by_index(fme,
329 FME_FEATURE_ID_THERMAL_MGMT);
331 temp_threshold.csr = readq(&thermal->threshold);
332 *thres1_reached = temp_threshold.thshold2_status;
337 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
340 struct feature_fme_thermal *thermal;
341 struct feature_fme_tmp_threshold temp_threshold;
343 thermal = get_fme_feature_ioaddr_by_index(fme,
344 FME_FEATURE_ID_THERMAL_MGMT);
346 temp_threshold.csr = readq(&thermal->threshold);
347 *thres1_policy = temp_threshold.thshold_policy;
352 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
355 struct feature_fme_thermal *thermal;
356 struct feature_fme_tmp_threshold tmp_threshold;
358 thermal = get_fme_feature_ioaddr_by_index(fme,
359 FME_FEATURE_ID_THERMAL_MGMT);
361 spinlock_lock(&fme->lock);
362 tmp_threshold.csr = readq(&thermal->threshold);
364 if (thres1_policy == 0) {
365 tmp_threshold.thshold_policy = 0;
366 } else if (thres1_policy == 1) {
367 tmp_threshold.thshold_policy = 1;
369 spinlock_unlock(&fme->lock);
373 writeq(tmp_threshold.csr, &thermal->threshold);
374 spinlock_unlock(&fme->lock);
379 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
381 struct feature_fme_thermal *thermal;
382 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
384 thermal = get_fme_feature_ioaddr_by_index(fme,
385 FME_FEATURE_ID_THERMAL_MGMT);
387 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
388 *temp = temp_rdsensor_fmt1.fpga_temp;
393 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
395 struct feature_fme_thermal *fme_thermal
396 = get_fme_feature_ioaddr_by_index(fme,
397 FME_FEATURE_ID_THERMAL_MGMT);
398 struct feature_header header;
400 header.csr = readq(&fme_thermal->header);
401 *revision = header.revision;
406 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
408 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
410 struct feature_fme_thermal *fme_thermal;
411 struct feature_fme_tmp_threshold_cap thermal_cap;
415 dev_info(NULL, "FME thermal mgmt Init.\n");
417 fme_thermal = (struct feature_fme_thermal *)feature->addr;
418 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
420 dev_info(NULL, "FME thermal cap %llx.\n",
421 (unsigned long long)fme_thermal->threshold_cap.csr);
423 if (thermal_cap.tmp_thshold_disabled)
424 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
429 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
433 dev_info(NULL, "FME thermal mgmt UInit.\n");
437 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
439 struct ifpga_fme_hw *fme = feature->parent;
441 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
444 switch (prop->prop_id) {
445 case FME_THERMAL_PROP_THRESHOLD1:
446 return fme_thermal_set_threshold1(fme, prop->data);
447 case FME_THERMAL_PROP_THRESHOLD2:
448 return fme_thermal_set_threshold2(fme, prop->data);
449 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
450 return fme_thermal_set_threshold1_policy(fme, prop->data);
457 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
459 struct ifpga_fme_hw *fme = feature->parent;
461 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
462 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
463 prop->prop_id != FME_THERMAL_PROP_REVISION)
466 switch (prop->prop_id) {
467 case FME_THERMAL_PROP_THRESHOLD1:
468 return fme_thermal_get_threshold1(fme, &prop->data);
469 case FME_THERMAL_PROP_THRESHOLD2:
470 return fme_thermal_get_threshold2(fme, &prop->data);
471 case FME_THERMAL_PROP_THRESHOLD_TRIP:
472 return fme_thermal_get_threshold_trip(fme, &prop->data);
473 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
474 return fme_thermal_get_threshold1_reached(fme, &prop->data);
475 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
476 return fme_thermal_get_threshold2_reached(fme, &prop->data);
477 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
478 return fme_thermal_get_threshold1_policy(fme, &prop->data);
479 case FME_THERMAL_PROP_TEMPERATURE:
480 return fme_thermal_get_temperature(fme, &prop->data);
481 case FME_THERMAL_PROP_REVISION:
482 return fme_thermal_get_revision(fme, &prop->data);
488 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
489 .init = fme_thermal_mgmt_init,
490 .uinit = fme_thermal_mgmt_uinit,
491 .get_prop = fme_thermal_get_prop,
492 .set_prop = fme_thermal_set_prop,
495 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
497 struct feature_fme_power *fme_power
498 = get_fme_feature_ioaddr_by_index(fme,
499 FME_FEATURE_ID_POWER_MGMT);
500 struct feature_fme_pm_status pm_status;
502 pm_status.csr = readq(&fme_power->status);
504 *consumed = pm_status.pwr_consumed;
509 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
511 struct feature_fme_power *fme_power
512 = get_fme_feature_ioaddr_by_index(fme,
513 FME_FEATURE_ID_POWER_MGMT);
514 struct feature_fme_pm_ap_threshold pm_ap_threshold;
516 pm_ap_threshold.csr = readq(&fme_power->threshold);
518 *threshold = pm_ap_threshold.threshold1;
523 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
525 struct feature_fme_power *fme_power
526 = get_fme_feature_ioaddr_by_index(fme,
527 FME_FEATURE_ID_POWER_MGMT);
528 struct feature_fme_pm_ap_threshold pm_ap_threshold;
530 spinlock_lock(&fme->lock);
531 pm_ap_threshold.csr = readq(&fme_power->threshold);
533 if (threshold <= PWR_THRESHOLD_MAX) {
534 pm_ap_threshold.threshold1 = threshold;
536 spinlock_unlock(&fme->lock);
540 writeq(pm_ap_threshold.csr, &fme_power->threshold);
541 spinlock_unlock(&fme->lock);
546 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
548 struct feature_fme_power *fme_power
549 = get_fme_feature_ioaddr_by_index(fme,
550 FME_FEATURE_ID_POWER_MGMT);
551 struct feature_fme_pm_ap_threshold pm_ap_threshold;
553 pm_ap_threshold.csr = readq(&fme_power->threshold);
555 *threshold = pm_ap_threshold.threshold2;
560 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
562 struct feature_fme_power *fme_power
563 = get_fme_feature_ioaddr_by_index(fme,
564 FME_FEATURE_ID_POWER_MGMT);
565 struct feature_fme_pm_ap_threshold pm_ap_threshold;
567 spinlock_lock(&fme->lock);
568 pm_ap_threshold.csr = readq(&fme_power->threshold);
570 if (threshold <= PWR_THRESHOLD_MAX) {
571 pm_ap_threshold.threshold2 = threshold;
573 spinlock_unlock(&fme->lock);
577 writeq(pm_ap_threshold.csr, &fme_power->threshold);
578 spinlock_unlock(&fme->lock);
583 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
584 u64 *threshold_status)
586 struct feature_fme_power *fme_power
587 = get_fme_feature_ioaddr_by_index(fme,
588 FME_FEATURE_ID_POWER_MGMT);
589 struct feature_fme_pm_ap_threshold pm_ap_threshold;
591 pm_ap_threshold.csr = readq(&fme_power->threshold);
593 *threshold_status = pm_ap_threshold.threshold1_status;
598 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
599 u64 *threshold_status)
601 struct feature_fme_power *fme_power
602 = get_fme_feature_ioaddr_by_index(fme,
603 FME_FEATURE_ID_POWER_MGMT);
604 struct feature_fme_pm_ap_threshold pm_ap_threshold;
606 pm_ap_threshold.csr = readq(&fme_power->threshold);
608 *threshold_status = pm_ap_threshold.threshold2_status;
613 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
615 struct feature_fme_power *fme_power
616 = get_fme_feature_ioaddr_by_index(fme,
617 FME_FEATURE_ID_POWER_MGMT);
618 struct feature_fme_pm_status pm_status;
620 pm_status.csr = readq(&fme_power->status);
622 *rtl = pm_status.fpga_latency_report;
627 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
629 struct feature_fme_power *fme_power
630 = get_fme_feature_ioaddr_by_index(fme,
631 FME_FEATURE_ID_POWER_MGMT);
632 struct feature_fme_pm_xeon_limit xeon_limit;
634 xeon_limit.csr = readq(&fme_power->xeon_limit);
636 if (!xeon_limit.enable)
637 xeon_limit.pwr_limit = 0;
639 *limit = xeon_limit.pwr_limit;
644 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
646 struct feature_fme_power *fme_power
647 = get_fme_feature_ioaddr_by_index(fme,
648 FME_FEATURE_ID_POWER_MGMT);
649 struct feature_fme_pm_fpga_limit fpga_limit;
651 fpga_limit.csr = readq(&fme_power->fpga_limit);
653 if (!fpga_limit.enable)
654 fpga_limit.pwr_limit = 0;
656 *limit = fpga_limit.pwr_limit;
661 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
663 struct feature_fme_power *fme_power
664 = get_fme_feature_ioaddr_by_index(fme,
665 FME_FEATURE_ID_POWER_MGMT);
666 struct feature_header header;
668 header.csr = readq(&fme_power->header);
669 *revision = header.revision;
674 static int fme_power_mgmt_init(struct ifpga_feature *feature)
678 dev_info(NULL, "FME power mgmt Init.\n");
683 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
687 dev_info(NULL, "FME power mgmt UInit.\n");
690 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
691 struct feature_prop *prop)
693 struct ifpga_fme_hw *fme = feature->parent;
695 switch (prop->prop_id) {
696 case FME_PWR_PROP_CONSUMED:
697 return fme_pwr_get_consumed(fme, &prop->data);
698 case FME_PWR_PROP_THRESHOLD1:
699 return fme_pwr_get_threshold1(fme, &prop->data);
700 case FME_PWR_PROP_THRESHOLD2:
701 return fme_pwr_get_threshold2(fme, &prop->data);
702 case FME_PWR_PROP_THRESHOLD1_STATUS:
703 return fme_pwr_get_threshold1_status(fme, &prop->data);
704 case FME_PWR_PROP_THRESHOLD2_STATUS:
705 return fme_pwr_get_threshold2_status(fme, &prop->data);
706 case FME_PWR_PROP_RTL:
707 return fme_pwr_get_rtl(fme, &prop->data);
708 case FME_PWR_PROP_XEON_LIMIT:
709 return fme_pwr_get_xeon_limit(fme, &prop->data);
710 case FME_PWR_PROP_FPGA_LIMIT:
711 return fme_pwr_get_fpga_limit(fme, &prop->data);
712 case FME_PWR_PROP_REVISION:
713 return fme_pwr_get_revision(fme, &prop->data);
719 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
720 struct feature_prop *prop)
722 struct ifpga_fme_hw *fme = feature->parent;
724 switch (prop->prop_id) {
725 case FME_PWR_PROP_THRESHOLD1:
726 return fme_pwr_set_threshold1(fme, prop->data);
727 case FME_PWR_PROP_THRESHOLD2:
728 return fme_pwr_set_threshold2(fme, prop->data);
734 struct ifpga_feature_ops fme_power_mgmt_ops = {
735 .init = fme_power_mgmt_init,
736 .uinit = fme_power_mgmt_uinit,
737 .get_prop = fme_power_mgmt_get_prop,
738 .set_prop = fme_power_mgmt_set_prop,
741 static int fme_hssi_eth_init(struct ifpga_feature *feature)
747 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
752 struct ifpga_feature_ops fme_hssi_eth_ops = {
753 .init = fme_hssi_eth_init,
754 .uinit = fme_hssi_eth_uinit,
757 static int fme_emif_init(struct ifpga_feature *feature)
763 static void fme_emif_uinit(struct ifpga_feature *feature)
768 struct ifpga_feature_ops fme_emif_ops = {
769 .init = fme_emif_init,
770 .uinit = fme_emif_uinit,
773 static const char *board_type_to_string(u32 type)
783 return "VC_4x25G+2x25G";
791 static const char *board_major_to_string(u32 major)
795 return "VISTA_CREEK";
799 return "DARBY_CREEK";
805 static int board_type_to_info(u32 type,
806 struct opae_board_info *info)
810 info->nums_of_retimer = 2;
811 info->ports_per_retimer = 4;
812 info->nums_of_fvl = 2;
813 info->ports_per_fvl = 4;
816 info->nums_of_retimer = 1;
817 info->ports_per_retimer = 4;
818 info->nums_of_fvl = 2;
819 info->ports_per_fvl = 2;
822 info->nums_of_retimer = 2;
823 info->ports_per_retimer = 1;
824 info->nums_of_fvl = 1;
825 info->ports_per_fvl = 2;
828 info->nums_of_retimer = 2;
829 info->ports_per_retimer = 2;
830 info->nums_of_fvl = 2;
831 info->ports_per_fvl = 2;
840 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
842 struct fme_bitstream_id id;
850 if (fme_hdr_get_bitstream_id(fme, &id.id))
853 fme->board_info.major = id.major;
854 fme->board_info.minor = id.minor;
855 fme->board_info.type = id.interface;
856 fme->board_info.fvl_bypass = id.fvl_bypass;
857 fme->board_info.mac_lightweight = id.mac_lightweight;
858 fme->board_info.lightweight = id.lightweiht;
859 fme->board_info.disaggregate = id.disagregate;
860 fme->board_info.seu = id.seu;
861 fme->board_info.ptp = id.ptp;
863 dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
866 hw->pci_data->function,
867 board_major_to_string(fme->board_info.major),
868 board_type_to_string(fme->board_info.type));
870 dev_info(fme, "support feature:\n"
872 "mac_lightweight:%s\n"
877 check_support(fme->board_info.fvl_bypass),
878 check_support(fme->board_info.mac_lightweight),
879 check_support(fme->board_info.lightweight),
880 check_support(fme->board_info.disaggregate),
881 check_support(fme->board_info.seu),
882 check_support(fme->board_info.ptp));
885 if (board_type_to_info(fme->board_info.type, &fme->board_info))
888 dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
889 fme->board_info.nums_of_retimer,
890 fme->board_info.ports_per_retimer,
891 fme->board_info.nums_of_fvl,
892 fme->board_info.ports_per_fvl);
894 if (max10_sys_read(fme->max10_dev, MAX10_BUILD_VER, &val))
896 fme->board_info.max10_version = val & 0xffffff;
898 if (max10_sys_read(fme->max10_dev, NIOS2_FW_VERSION, &val))
900 fme->board_info.nios_fw_version = val & 0xffffff;
902 dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
903 fme->board_info.max10_version,
904 fme->board_info.nios_fw_version);
909 static int spi_self_checking(struct intel_max10_device *dev)
914 ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
918 dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
923 static void init_spi_share_data(struct ifpga_fme_hw *fme,
924 struct altera_spi_device *spi)
926 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
927 opae_share_data *sd = NULL;
929 if (hw && hw->adapter && hw->adapter->shm.ptr) {
930 dev_info(NULL, "transfer share data to spi\n");
931 sd = (opae_share_data *)hw->adapter->shm.ptr;
932 spi->mutex = &sd->spi_mutex;
933 spi->dtb_sz_ptr = &sd->dtb_size;
937 spi->dtb_sz_ptr = NULL;
942 static int fme_spi_init(struct ifpga_feature *feature)
944 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
945 struct altera_spi_device *spi_master;
946 struct intel_max10_device *max10;
949 dev_info(fme, "FME SPI Master (Max10) Init.\n");
950 dev_debug(fme, "FME SPI base addr %p.\n",
952 dev_debug(fme, "spi param=0x%llx\n",
953 (unsigned long long)opae_readq(feature->addr + 0x8));
955 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
958 init_spi_share_data(fme, spi_master);
960 altera_spi_init(spi_master);
962 max10 = intel_max10_device_probe(spi_master, 0);
965 dev_err(fme, "max10 init fail\n");
969 fme->max10_dev = max10;
972 if (spi_self_checking(max10)) {
980 intel_max10_device_remove(fme->max10_dev);
982 altera_spi_release(spi_master);
986 static void fme_spi_uinit(struct ifpga_feature *feature)
988 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
991 intel_max10_device_remove(fme->max10_dev);
994 struct ifpga_feature_ops fme_spi_master_ops = {
995 .init = fme_spi_init,
996 .uinit = fme_spi_uinit,
999 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
1002 unsigned long timeout = rte_get_timer_cycles() +
1003 msecs_to_timer_cycles(10000);
1004 unsigned long ticks;
1006 int fecmode = FEC_MODE_NO;
1008 if (spi_reg_read(dev, NIOS_VERSION, &val))
1012 (val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT;
1013 dev_info(dev, "A10 NIOS FW version %d\n", major_version);
1015 if (major_version >= 3) {
1016 /* read NIOS_INIT to check if PKVL INIT done or not */
1017 if (spi_reg_read(dev, NIOS_INIT, &val))
1020 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1022 /* check if PKVLs are initialized already */
1023 if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
1024 goto nios_init_done;
1026 /* start to config the default FEC mode */
1027 val = fecmode | NIOS_INIT_START;
1029 if (spi_reg_write(dev, NIOS_INIT, val))
1035 if (spi_reg_read(dev, NIOS_INIT, &val))
1037 if (val & NIOS_INIT_DONE)
1040 ticks = rte_get_timer_cycles();
1041 if (time_after(ticks, timeout))
1046 /* get the fecmode */
1047 if (spi_reg_read(dev, NIOS_INIT, &val))
1049 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1050 fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT;
1051 dev_info(dev, "fecmode: 0x%x, %s\n", fecmode,
1052 (fecmode == FEC_MODE_KR) ? "kr" :
1053 ((fecmode == FEC_MODE_RS) ? "rs" : "no"));
1058 static int nios_spi_check_error(struct altera_spi_device *dev)
1062 if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
1065 dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
1070 if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
1073 dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
1081 static int fme_nios_spi_init(struct ifpga_feature *feature)
1083 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1084 struct altera_spi_device *spi_master;
1085 struct intel_max10_device *max10;
1086 struct ifpga_hw *hw;
1087 struct opae_manager *mgr;
1094 mgr = hw->adapter->mgr;
1098 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
1099 dev_debug(fme, "FME SPI base addr %p.\n",
1101 dev_debug(fme, "spi param=0x%llx\n",
1102 (unsigned long long)opae_readq(feature->addr + 0x8));
1104 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1107 init_spi_share_data(fme, spi_master);
1110 * 1. wait A10 NIOS initial finished and
1111 * release the SPI master to Host
1113 if (spi_master->mutex)
1114 pthread_mutex_lock(spi_master->mutex);
1116 ret = nios_spi_wait_init_done(spi_master);
1118 dev_err(fme, "FME NIOS_SPI init fail\n");
1119 if (spi_master->mutex)
1120 pthread_mutex_unlock(spi_master->mutex);
1124 dev_info(fme, "FME NIOS_SPI initial done\n");
1126 /* 2. check if error occur? */
1127 if (nios_spi_check_error(spi_master))
1128 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1130 if (spi_master->mutex)
1131 pthread_mutex_unlock(spi_master->mutex);
1133 /* 3. init the spi master*/
1134 altera_spi_init(spi_master);
1136 /* init the max10 device */
1137 max10 = intel_max10_device_probe(spi_master, 0);
1140 dev_err(fme, "max10 init fail\n");
1144 fme->max10_dev = max10;
1146 max10->bus = hw->pci_data->bus;
1148 fme_get_board_interface(fme);
1150 mgr->sensor_list = &max10->opae_sensor_list;
1153 if (spi_self_checking(max10))
1156 ret = init_sec_mgr(fme);
1158 dev_err(fme, "security manager init fail\n");
1165 intel_max10_device_remove(fme->max10_dev);
1167 altera_spi_release(spi_master);
1171 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1173 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1175 release_sec_mgr(fme);
1177 intel_max10_device_remove(fme->max10_dev);
1180 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1181 .init = fme_nios_spi_init,
1182 .uinit = fme_nios_spi_uinit,
1185 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1189 char read_buf[20] = {0,};
1190 const char *string = "1a2b3c4d5e";
1192 opae_memcpy(buf, string, strlen(string));
1194 ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1195 (u8 *)buf, strlen(string));
1197 dev_err(NULL, "write i2c error:%d\n", ret);
1201 ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1202 (u8 *)read_buf, strlen(string));
1204 dev_err(NULL, "read i2c error:%d\n", ret);
1208 if (memcmp(buf, read_buf, strlen(string))) {
1209 dev_err(NULL, "%s test fail!\n", __func__);
1213 dev_info(NULL, "%s test successful\n", __func__);
1218 static void init_i2c_mutex(struct ifpga_fme_hw *fme)
1220 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
1221 struct altera_i2c_dev *i2c_dev;
1222 opae_share_data *sd = NULL;
1224 if (fme->i2c_master) {
1225 i2c_dev = (struct altera_i2c_dev *)fme->i2c_master;
1226 if (hw && hw->adapter && hw->adapter->shm.ptr) {
1227 dev_info(NULL, "use multi-process mutex in i2c\n");
1228 sd = (opae_share_data *)hw->adapter->shm.ptr;
1229 i2c_dev->mutex = &sd->i2c_mutex;
1231 dev_info(NULL, "use multi-thread mutex in i2c\n");
1232 i2c_dev->mutex = &i2c_dev->lock;
1237 static int fme_i2c_init(struct ifpga_feature *feature)
1239 struct feature_fme_i2c *i2c;
1240 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1242 i2c = (struct feature_fme_i2c *)feature->addr;
1244 dev_info(NULL, "FME I2C Master Init.\n");
1246 fme->i2c_master = altera_i2c_probe(i2c);
1247 if (!fme->i2c_master)
1250 init_i2c_mutex(fme);
1252 /* MAC ROM self test */
1253 i2c_mac_rom_test(fme->i2c_master);
1258 static void fme_i2c_uninit(struct ifpga_feature *feature)
1260 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1262 altera_i2c_remove(fme->i2c_master);
1265 struct ifpga_feature_ops fme_i2c_master_ops = {
1266 .init = fme_i2c_init,
1267 .uinit = fme_i2c_uninit,
1270 static int fme_eth_group_init(struct ifpga_feature *feature)
1272 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1273 struct eth_group_device *dev;
1275 dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1279 fme->eth_dev[dev->group_id] = dev;
1281 fme->eth_group_region[dev->group_id].addr =
1283 fme->eth_group_region[dev->group_id].phys_addr =
1285 fme->eth_group_region[dev->group_id].len =
1288 fme->nums_eth_dev++;
1290 dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1291 dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1292 dev->group_id, feature->addr,
1293 (unsigned long long)feature->phys_addr,
1299 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1304 struct ifpga_feature_ops fme_eth_group_ops = {
1305 .init = fme_eth_group_init,
1306 .uinit = fme_eth_group_uinit,
1309 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1310 void *buf, int size)
1312 struct altera_i2c_dev *dev;
1314 dev = fme->i2c_master;
1318 return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1321 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1322 void *buf, int size)
1324 struct altera_i2c_dev *dev;
1326 dev = fme->i2c_master;
1330 return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1333 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1336 struct eth_group_device *dev;
1338 if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1341 dev = (struct eth_group_device *)fme->eth_dev[group_id];
1345 if (dev->status != ETH_GROUP_DEV_ATTACHED)
1351 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1353 return fme->nums_eth_dev;
1356 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1357 u8 group_id, struct opae_eth_group_info *info)
1359 struct eth_group_device *dev;
1361 dev = get_eth_group_dev(fme, group_id);
1365 info->group_id = group_id;
1366 info->speed = dev->speed;
1367 info->nums_of_mac = dev->mac_num;
1368 info->nums_of_phy = dev->phy_num;
1373 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1374 u8 type, u8 index, u16 addr, u32 *data)
1376 struct eth_group_device *dev;
1378 dev = get_eth_group_dev(fme, group_id);
1382 return eth_group_read_reg(dev, type, index, addr, data);
1385 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1386 u8 type, u8 index, u16 addr, u32 data)
1388 struct eth_group_device *dev;
1390 dev = get_eth_group_dev(fme, group_id);
1394 return eth_group_write_reg(dev, type, index, addr, data);
1397 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1400 struct eth_group_device *dev;
1402 dev = get_eth_group_dev(fme, group_id);
1409 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1410 struct opae_retimer_info *info)
1412 struct intel_max10_device *dev;
1414 dev = (struct intel_max10_device *)fme->max10_dev;
1418 info->nums_retimer = fme->board_info.nums_of_retimer;
1419 info->ports_per_retimer = fme->board_info.ports_per_retimer;
1420 info->nums_fvl = fme->board_info.nums_of_fvl;
1421 info->ports_per_fvl = fme->board_info.ports_per_fvl;
1423 /* The speed of PKVL is identical the eth group's speed */
1424 info->support_speed = fme_get_eth_group_speed(fme,
1425 LINE_SIDE_GROUP_ID);
1430 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1431 struct opae_retimer_status *status)
1433 struct intel_max10_device *dev;
1436 dev = (struct intel_max10_device *)fme->max10_dev;
1440 if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
1441 dev_err(dev, "%s: read pkvl status fail\n", __func__);
1445 /* The speed of PKVL is identical the eth group's speed */
1446 status->speed = fme_get_eth_group_speed(fme,
1447 LINE_SIDE_GROUP_ID);
1449 status->line_link_bitmap = val;
1451 dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1453 status->line_link_bitmap);
1458 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1459 struct opae_sensor_info *sensor,
1460 unsigned int *value)
1462 struct intel_max10_device *dev;
1464 dev = (struct intel_max10_device *)fme->max10_dev;
1468 if (max10_sys_read(dev, sensor->value_reg, value)) {
1469 dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1470 __func__, sensor->value_reg);
1474 *value *= sensor->multiplier;