1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include "ifpga_feature_dev.h"
10 * Enable Port by clear the port soft reset bit, which is set by default.
11 * The AFU is unable to respond to any MMIO access while in reset.
12 * __fpga_port_enable function should only be used after __fpga_port_disable
15 void __fpga_port_enable(struct ifpga_port_hw *port)
17 struct feature_port_header *port_hdr;
18 struct feature_port_control control;
20 WARN_ON(!port->disable_count);
22 if (--port->disable_count != 0)
25 port_hdr = get_port_feature_ioaddr_by_index(port,
26 PORT_FEATURE_ID_HEADER);
29 control.csr = readq(&port_hdr->control);
30 control.port_sftrst = 0x0;
31 writeq(control.csr, &port_hdr->control);
34 int __fpga_port_disable(struct ifpga_port_hw *port)
36 struct feature_port_header *port_hdr;
37 struct feature_port_control control;
39 if (port->disable_count++ != 0)
42 port_hdr = get_port_feature_ioaddr_by_index(port,
43 PORT_FEATURE_ID_HEADER);
46 /* Set port soft reset */
47 control.csr = readq(&port_hdr->control);
48 control.port_sftrst = 0x1;
49 writeq(control.csr, &port_hdr->control);
52 * HW sets ack bit to 1 when all outstanding requests have been drained
53 * on this port and minimum soft reset pulse width has elapsed.
54 * Driver polls port_soft_reset_ack to determine if reset done by HW.
56 control.port_sftrst_ack = 1;
58 if (fpga_wait_register_field(port_sftrst_ack, control,
59 &port_hdr->control, RST_POLL_TIMEOUT,
61 dev_err(port, "timeout, fail to reset device\n");
68 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70 struct feature_port_header *port_hdr;
76 port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
78 spinlock_lock(&port->lock);
79 guidl = readq(&port_hdr->afu_header.guid.b[0]);
80 guidh = readq(&port_hdr->afu_header.guid.b[8]);
81 spinlock_unlock(&port->lock);
83 opae_memcpy(uuid->b, &guidl, sizeof(u64));
84 opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
89 /* Mask / Unmask Port Errors by the Error Mask register. */
90 void port_err_mask(struct ifpga_port_hw *port, bool mask)
92 struct feature_port_error *port_err;
93 struct feature_port_err_key err_mask;
95 port_err = get_port_feature_ioaddr_by_index(port,
96 PORT_FEATURE_ID_ERROR);
99 err_mask.csr = PORT_ERR_MASK;
103 writeq(err_mask.csr, &port_err->error_mask);
106 /* Clear All Port Errors. */
107 int port_err_clear(struct ifpga_port_hw *port, u64 err)
109 struct feature_port_header *port_hdr;
110 struct feature_port_error *port_err;
111 struct feature_port_err_key mask;
112 struct feature_port_first_err_key first;
113 struct feature_port_status status;
116 port_err = get_port_feature_ioaddr_by_index(port,
117 PORT_FEATURE_ID_ERROR);
118 port_hdr = get_port_feature_ioaddr_by_index(port,
119 PORT_FEATURE_ID_HEADER);
122 * Clear All Port Errors
124 * - Check for AP6 State
125 * - Halt Port by keeping Port in reset
126 * - Set PORT Error mask to all 1 to mask errors
128 * - Set Port mask to all 0 to enable errors
129 * - All errors start capturing new errors
130 * - Enable Port by pulling the port out of reset
133 /* If device is still in AP6 state, can not clear any error.*/
134 status.csr = readq(&port_hdr->status);
135 if (status.power_state == PORT_POWER_STATE_AP6) {
136 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
140 /* Halt Port by keeping Port in reset */
141 ret = __fpga_port_disable(port);
145 /* Mask all errors */
146 port_err_mask(port, true);
148 /* Clear errors if err input matches with current port errors.*/
149 mask.csr = readq(&port_err->port_error);
151 if (mask.csr == err) {
152 writeq(mask.csr, &port_err->port_error);
154 first.csr = readq(&port_err->port_first_error);
155 writeq(first.csr, &port_err->port_first_error);
161 port_err_mask(port, false);
163 /* Enable the Port by clear the reset */
164 __fpga_port_enable(port);
169 int port_clear_error(struct ifpga_port_hw *port)
171 struct feature_port_error *port_err;
172 struct feature_port_err_key error;
174 port_err = get_port_feature_ioaddr_by_index(port,
175 PORT_FEATURE_ID_ERROR);
176 error.csr = readq(&port_err->port_error);
178 dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
180 return port_err_clear(port, error.csr);
183 static struct feature_driver fme_feature_drvs[] = {
184 {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
186 {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
187 &fme_thermal_mgmt_ops),},
188 {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
189 &fme_power_mgmt_ops),},
190 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
191 &fme_global_err_ops),},
192 {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
194 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
195 &fme_global_dperf_ops),},
196 {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
197 &fme_hssi_eth_ops),},
198 {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
200 {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
201 &fme_spi_master_ops),},
202 {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
203 &fme_nios_spi_master_ops),},
204 {FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
205 &fme_i2c_master_ops),},
206 {FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
207 &fme_eth_group_ops),},
208 {0, NULL, NULL}, /* end of arrary */
211 static struct feature_driver port_feature_drvs[] = {
212 {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
213 &ifpga_rawdev_port_hdr_ops)},
214 {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
215 &ifpga_rawdev_port_error_ops)},
216 {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
217 &ifpga_rawdev_port_uint_ops)},
218 {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
219 &ifpga_rawdev_port_stp_ops)},
220 {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
221 &ifpga_rawdev_port_afu_ops)},
222 {0, NULL, NULL}, /* end of array */
225 const char *get_fme_feature_name(unsigned int id)
227 struct feature_driver *drv = fme_feature_drvs;
239 const char *get_port_feature_name(unsigned int id)
241 struct feature_driver *drv = port_feature_drvs;
253 static void feature_uinit(struct ifpga_feature_list *list)
255 struct ifpga_feature *feature;
257 TAILQ_FOREACH(feature, list, next) {
258 if (feature->state != IFPGA_FEATURE_ATTACHED)
260 if (feature->ops && feature->ops->uinit)
261 feature->ops->uinit(feature);
265 static int feature_init(struct feature_driver *drv,
266 struct ifpga_feature_list *list)
268 struct ifpga_feature *feature;
272 TAILQ_FOREACH(feature, list, next) {
273 if (feature->state != IFPGA_FEATURE_ATTACHED)
275 if (feature->id == drv->id) {
276 feature->ops = drv->ops;
277 feature->name = drv->name;
278 if (feature->ops->init) {
279 ret = feature->ops->init(feature);
294 int fme_hw_init(struct ifpga_fme_hw *fme)
298 if (fme->state != IFPGA_FME_IMPLEMENTED)
301 ret = feature_init(fme_feature_drvs, &fme->feature_list);
308 void fme_hw_uinit(struct ifpga_fme_hw *fme)
310 feature_uinit(&fme->feature_list);
313 void port_hw_uinit(struct ifpga_port_hw *port)
315 feature_uinit(&port->feature_list);
318 int port_hw_init(struct ifpga_port_hw *port)
322 if (port->state == IFPGA_PORT_UNUSED)
325 ret = feature_init(port_feature_drvs, &port->feature_list);