1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include "ifpga_feature_dev.h"
10 * Enable Port by clear the port soft reset bit, which is set by default.
11 * The AFU is unable to respond to any MMIO access while in reset.
12 * __fpga_port_enable function should only be used after __fpga_port_disable
15 void __fpga_port_enable(struct ifpga_port_hw *port)
17 struct feature_port_header *port_hdr;
18 struct feature_port_control control;
20 WARN_ON(!port->disable_count);
22 if (--port->disable_count != 0)
25 port_hdr = get_port_feature_ioaddr_by_index(port,
26 PORT_FEATURE_ID_HEADER);
29 control.csr = readq(&port_hdr->control);
30 control.port_sftrst = 0x0;
31 writeq(control.csr, &port_hdr->control);
34 int __fpga_port_disable(struct ifpga_port_hw *port)
36 struct feature_port_header *port_hdr;
37 struct feature_port_control control;
39 if (port->disable_count++ != 0)
42 port_hdr = get_port_feature_ioaddr_by_index(port,
43 PORT_FEATURE_ID_HEADER);
46 /* Set port soft reset */
47 control.csr = readq(&port_hdr->control);
48 control.port_sftrst = 0x1;
49 writeq(control.csr, &port_hdr->control);
52 * HW sets ack bit to 1 when all outstanding requests have been drained
53 * on this port and minimum soft reset pulse width has elapsed.
54 * Driver polls port_soft_reset_ack to determine if reset done by HW.
56 control.port_sftrst_ack = 1;
58 if (fpga_wait_register_field(port_sftrst_ack, control,
59 &port_hdr->control, RST_POLL_TIMEOUT,
61 dev_err(port, "timeout, fail to reset device\n");
68 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70 struct feature_port_header *port_hdr;
76 port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
78 spinlock_lock(&port->lock);
79 guidl = readq(&port_hdr->afu_header.guid.b[0]);
80 guidh = readq(&port_hdr->afu_header.guid.b[8]);
81 spinlock_unlock(&port->lock);
83 opae_memcpy(uuid->b, &guidl, sizeof(u64));
84 opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
89 /* Mask / Unmask Port Errors by the Error Mask register. */
90 void port_err_mask(struct ifpga_port_hw *port, bool mask)
92 struct feature_port_error *port_err;
93 struct feature_port_err_key err_mask;
95 port_err = get_port_feature_ioaddr_by_index(port,
96 PORT_FEATURE_ID_ERROR);
99 err_mask.csr = PORT_ERR_MASK;
103 writeq(err_mask.csr, &port_err->error_mask);
106 /* Clear All Port Errors. */
107 int port_err_clear(struct ifpga_port_hw *port, u64 err)
109 struct feature_port_header *port_hdr;
110 struct feature_port_error *port_err;
111 struct feature_port_err_key mask;
112 struct feature_port_first_err_key first;
113 struct feature_port_status status;
116 port_err = get_port_feature_ioaddr_by_index(port,
117 PORT_FEATURE_ID_ERROR);
118 port_hdr = get_port_feature_ioaddr_by_index(port,
119 PORT_FEATURE_ID_HEADER);
122 * Clear All Port Errors
124 * - Check for AP6 State
125 * - Halt Port by keeping Port in reset
126 * - Set PORT Error mask to all 1 to mask errors
128 * - Set Port mask to all 0 to enable errors
129 * - All errors start capturing new errors
130 * - Enable Port by pulling the port out of reset
133 /* If device is still in AP6 state, can not clear any error.*/
134 status.csr = readq(&port_hdr->status);
135 if (status.power_state == PORT_POWER_STATE_AP6) {
136 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
140 /* Halt Port by keeping Port in reset */
141 ret = __fpga_port_disable(port);
145 /* Mask all errors */
146 port_err_mask(port, true);
148 /* Clear errors if err input matches with current port errors.*/
149 mask.csr = readq(&port_err->port_error);
151 if (mask.csr == err) {
152 writeq(mask.csr, &port_err->port_error);
154 first.csr = readq(&port_err->port_first_error);
155 writeq(first.csr, &port_err->port_first_error);
161 port_err_mask(port, false);
163 /* Enable the Port by clear the reset */
164 __fpga_port_enable(port);
169 int port_clear_error(struct ifpga_port_hw *port)
171 struct feature_port_error *port_err;
172 struct feature_port_err_key error;
174 port_err = get_port_feature_ioaddr_by_index(port,
175 PORT_FEATURE_ID_ERROR);
176 error.csr = readq(&port_err->port_error);
178 dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
180 return port_err_clear(port, error.csr);
183 static struct feature_driver fme_feature_drvs[] = {
184 {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
186 {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
187 &fme_thermal_mgmt_ops),},
188 {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
189 &fme_power_mgmt_ops),},
190 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
191 &fme_global_err_ops),},
192 {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
194 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
195 &fme_global_dperf_ops),},
196 {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
197 &fme_hssi_eth_ops),},
198 {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
200 {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
201 &fme_spi_master_ops),},
202 {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
203 &fme_nios_spi_master_ops),},
204 {0, NULL, NULL}, /* end of arrary */
207 static struct feature_driver port_feature_drvs[] = {
208 {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
209 &ifpga_rawdev_port_hdr_ops)},
210 {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
211 &ifpga_rawdev_port_error_ops)},
212 {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
213 &ifpga_rawdev_port_uint_ops)},
214 {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
215 &ifpga_rawdev_port_stp_ops)},
216 {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
217 &ifpga_rawdev_port_afu_ops)},
218 {0, NULL, NULL}, /* end of array */
221 const char *get_fme_feature_name(unsigned int id)
223 struct feature_driver *drv = fme_feature_drvs;
235 const char *get_port_feature_name(unsigned int id)
237 struct feature_driver *drv = port_feature_drvs;
249 static void feature_uinit(struct ifpga_feature_list *list)
251 struct feature *feature;
253 TAILQ_FOREACH(feature, list, next) {
254 if (feature->state != IFPGA_FEATURE_ATTACHED)
256 if (feature->ops && feature->ops->uinit)
257 feature->ops->uinit(feature);
261 static int feature_init(struct feature_driver *drv,
262 struct ifpga_feature_list *list)
264 struct feature *feature;
268 TAILQ_FOREACH(feature, list, next) {
269 if (feature->state != IFPGA_FEATURE_ATTACHED)
271 if (feature->id == drv->id) {
272 feature->ops = drv->ops;
273 feature->name = drv->name;
274 if (feature->ops->init) {
275 ret = feature->ops->init(feature);
290 int fme_hw_init(struct ifpga_fme_hw *fme)
294 if (fme->state != IFPGA_FME_IMPLEMENTED)
297 ret = feature_init(fme_feature_drvs, &fme->feature_list);
304 void fme_hw_uinit(struct ifpga_fme_hw *fme)
306 feature_uinit(&fme->feature_list);
309 void port_hw_uinit(struct ifpga_port_hw *port)
311 feature_uinit(&port->feature_list);
314 int port_hw_init(struct ifpga_port_hw *port)
318 if (port->state == IFPGA_PORT_UNUSED)
321 ret = feature_init(port_feature_drvs, &port->feature_list);