1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include "ifpga_feature_dev.h"
10 * Enable Port by clear the port soft reset bit, which is set by default.
11 * The AFU is unable to respond to any MMIO access while in reset.
12 * __fpga_port_enable function should only be used after __fpga_port_disable
15 void __fpga_port_enable(struct ifpga_port_hw *port)
17 struct feature_port_header *port_hdr;
18 struct feature_port_control control;
20 WARN_ON(!port->disable_count);
22 if (--port->disable_count != 0)
25 port_hdr = get_port_feature_ioaddr_by_index(port,
26 PORT_FEATURE_ID_HEADER);
29 control.csr = readq(&port_hdr->control);
30 control.port_sftrst = 0x0;
31 writeq(control.csr, &port_hdr->control);
34 int __fpga_port_disable(struct ifpga_port_hw *port)
36 struct feature_port_header *port_hdr;
37 struct feature_port_control control;
39 if (port->disable_count++ != 0)
42 port_hdr = get_port_feature_ioaddr_by_index(port,
43 PORT_FEATURE_ID_HEADER);
46 /* Set port soft reset */
47 control.csr = readq(&port_hdr->control);
48 control.port_sftrst = 0x1;
49 writeq(control.csr, &port_hdr->control);
52 * HW sets ack bit to 1 when all outstanding requests have been drained
53 * on this port and minimum soft reset pulse width has elapsed.
54 * Driver polls port_soft_reset_ack to determine if reset done by HW.
56 control.port_sftrst_ack = 1;
58 if (fpga_wait_register_field(port_sftrst_ack, control,
59 &port_hdr->control, RST_POLL_TIMEOUT,
61 dev_err(port, "timeout, fail to reset device\n");
68 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70 struct feature_port_header *port_hdr;
73 port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
75 spinlock_lock(&port->lock);
76 guidl = readq(&port_hdr->afu_header.guid.b[0]);
77 guidh = readq(&port_hdr->afu_header.guid.b[8]);
78 spinlock_unlock(&port->lock);
80 memcpy(uuid->b, &guidl, sizeof(u64));
81 memcpy(uuid->b + 8, &guidh, sizeof(u64));
86 /* Mask / Unmask Port Errors by the Error Mask register. */
87 void port_err_mask(struct ifpga_port_hw *port, bool mask)
89 struct feature_port_error *port_err;
90 struct feature_port_err_key err_mask;
92 port_err = get_port_feature_ioaddr_by_index(port,
93 PORT_FEATURE_ID_ERROR);
96 err_mask.csr = PORT_ERR_MASK;
100 writeq(err_mask.csr, &port_err->error_mask);
103 /* Clear All Port Errors. */
104 int port_err_clear(struct ifpga_port_hw *port, u64 err)
106 struct feature_port_header *port_hdr;
107 struct feature_port_error *port_err;
108 struct feature_port_err_key mask;
109 struct feature_port_first_err_key first;
110 struct feature_port_status status;
113 port_err = get_port_feature_ioaddr_by_index(port,
114 PORT_FEATURE_ID_ERROR);
115 port_hdr = get_port_feature_ioaddr_by_index(port,
116 PORT_FEATURE_ID_HEADER);
119 * Clear All Port Errors
121 * - Check for AP6 State
122 * - Halt Port by keeping Port in reset
123 * - Set PORT Error mask to all 1 to mask errors
125 * - Set Port mask to all 0 to enable errors
126 * - All errors start capturing new errors
127 * - Enable Port by pulling the port out of reset
130 /* If device is still in AP6 state, can not clear any error.*/
131 status.csr = readq(&port_hdr->status);
132 if (status.power_state == PORT_POWER_STATE_AP6) {
133 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
137 /* Halt Port by keeping Port in reset */
138 ret = __fpga_port_disable(port);
142 /* Mask all errors */
143 port_err_mask(port, true);
145 /* Clear errors if err input matches with current port errors.*/
146 mask.csr = readq(&port_err->port_error);
148 if (mask.csr == err) {
149 writeq(mask.csr, &port_err->port_error);
151 first.csr = readq(&port_err->port_first_error);
152 writeq(first.csr, &port_err->port_first_error);
158 port_err_mask(port, false);
160 /* Enable the Port by clear the reset */
161 __fpga_port_enable(port);
166 int port_clear_error(struct ifpga_port_hw *port)
168 struct feature_port_error *port_err;
169 struct feature_port_err_key error;
171 port_err = get_port_feature_ioaddr_by_index(port,
172 PORT_FEATURE_ID_ERROR);
173 error.csr = readq(&port_err->port_error);
175 dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
177 return port_err_clear(port, error.csr);
180 void fme_hw_uinit(struct ifpga_fme_hw *fme)
182 struct feature *feature;
185 if (fme->state != IFPGA_FME_IMPLEMENTED)
188 for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
189 feature = &fme->sub_feature[i];
190 if (feature->state == IFPGA_FEATURE_ATTACHED &&
191 feature->ops && feature->ops->uinit)
192 feature->ops->uinit(feature);
196 int fme_hw_init(struct ifpga_fme_hw *fme)
198 struct feature *feature;
201 if (fme->state != IFPGA_FME_IMPLEMENTED)
204 for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
205 feature = &fme->sub_feature[i];
206 if (feature->state == IFPGA_FEATURE_ATTACHED &&
207 feature->ops && feature->ops->init) {
208 ret = feature->ops->init(feature);
219 void port_hw_uinit(struct ifpga_port_hw *port)
221 struct feature *feature;
224 for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
225 feature = &port->sub_feature[i];
226 if (feature->state == IFPGA_FEATURE_ATTACHED &&
227 feature->ops && feature->ops->uinit)
228 feature->ops->uinit(feature);
232 int port_hw_init(struct ifpga_port_hw *port)
234 struct feature *feature;
237 if (port->state == IFPGA_PORT_UNUSED)
240 for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
241 feature = &port->sub_feature[i];
242 if (feature->ops && feature->ops->init) {
243 ret = feature->ops->init(feature);