1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include "ifpga_feature_dev.h"
11 * Enable Port by clear the port soft reset bit, which is set by default.
12 * The AFU is unable to respond to any MMIO access while in reset.
13 * __fpga_port_enable function should only be used after __fpga_port_disable
16 void __fpga_port_enable(struct ifpga_port_hw *port)
18 struct feature_port_header *port_hdr;
19 struct feature_port_control control;
21 WARN_ON(!port->disable_count);
23 if (--port->disable_count != 0)
26 port_hdr = get_port_feature_ioaddr_by_index(port,
27 PORT_FEATURE_ID_HEADER);
30 control.csr = readq(&port_hdr->control);
31 control.port_sftrst = 0x0;
32 writeq(control.csr, &port_hdr->control);
35 int __fpga_port_disable(struct ifpga_port_hw *port)
37 struct feature_port_header *port_hdr;
38 struct feature_port_control control;
40 if (port->disable_count++ != 0)
43 port_hdr = get_port_feature_ioaddr_by_index(port,
44 PORT_FEATURE_ID_HEADER);
47 /* Set port soft reset */
48 control.csr = readq(&port_hdr->control);
49 control.port_sftrst = 0x1;
50 writeq(control.csr, &port_hdr->control);
53 * HW sets ack bit to 1 when all outstanding requests have been drained
54 * on this port and minimum soft reset pulse width has elapsed.
55 * Driver polls port_soft_reset_ack to determine if reset done by HW.
57 control.port_sftrst_ack = 1;
59 if (fpga_wait_register_field(port_sftrst_ack, control,
60 &port_hdr->control, RST_POLL_TIMEOUT,
62 dev_err(port, "timeout, fail to reset device\n");
69 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
71 struct feature_port_header *port_hdr;
77 port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
79 spinlock_lock(&port->lock);
80 guidl = readq(&port_hdr->afu_header.guid.b[0]);
81 guidh = readq(&port_hdr->afu_header.guid.b[8]);
82 spinlock_unlock(&port->lock);
84 opae_memcpy(uuid->b, &guidl, sizeof(u64));
85 opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
90 int fpga_get_pr_uuid(struct ifpga_fme_hw *fme, struct uuid *uuid)
92 struct feature_fme_pr *fme_pr;
98 fme_pr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_PR_MGMT);
100 spinlock_lock(&fme->lock);
101 guidl = readq(&fme_pr->fme_pr_intfc_id_l);
102 guidh = readq(&fme_pr->fme_pr_intfc_id_h);
103 spinlock_unlock(&fme->lock);
105 opae_memcpy(uuid->b, &guidl, sizeof(u64));
106 opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
111 /* Mask / Unmask Port Errors by the Error Mask register. */
112 void port_err_mask(struct ifpga_port_hw *port, bool mask)
114 struct feature_port_error *port_err;
115 struct feature_port_err_key err_mask;
117 port_err = get_port_feature_ioaddr_by_index(port,
118 PORT_FEATURE_ID_ERROR);
121 err_mask.csr = PORT_ERR_MASK;
125 writeq(err_mask.csr, &port_err->error_mask);
128 /* Clear All Port Errors. */
129 int port_err_clear(struct ifpga_port_hw *port, u64 err)
131 struct feature_port_header *port_hdr;
132 struct feature_port_error *port_err;
133 struct feature_port_err_key mask;
134 struct feature_port_first_err_key first;
135 struct feature_port_status status;
138 port_err = get_port_feature_ioaddr_by_index(port,
139 PORT_FEATURE_ID_ERROR);
140 port_hdr = get_port_feature_ioaddr_by_index(port,
141 PORT_FEATURE_ID_HEADER);
144 * Clear All Port Errors
146 * - Check for AP6 State
147 * - Halt Port by keeping Port in reset
148 * - Set PORT Error mask to all 1 to mask errors
150 * - Set Port mask to all 0 to enable errors
151 * - All errors start capturing new errors
152 * - Enable Port by pulling the port out of reset
155 /* If device is still in AP6 state, can not clear any error.*/
156 status.csr = readq(&port_hdr->status);
157 if (status.power_state == PORT_POWER_STATE_AP6) {
158 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
162 /* Halt Port by keeping Port in reset */
163 ret = __fpga_port_disable(port);
167 /* Mask all errors */
168 port_err_mask(port, true);
170 /* Clear errors if err input matches with current port errors.*/
171 mask.csr = readq(&port_err->port_error);
173 if (mask.csr == err) {
174 writeq(mask.csr, &port_err->port_error);
176 first.csr = readq(&port_err->port_first_error);
177 writeq(first.csr, &port_err->port_first_error);
183 port_err_mask(port, false);
185 /* Enable the Port by clear the reset */
186 __fpga_port_enable(port);
191 int port_clear_error(struct ifpga_port_hw *port)
193 struct feature_port_error *port_err;
194 struct feature_port_err_key error;
196 port_err = get_port_feature_ioaddr_by_index(port,
197 PORT_FEATURE_ID_ERROR);
198 error.csr = readq(&port_err->port_error);
200 dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
202 return port_err_clear(port, error.csr);
205 static struct feature_driver fme_feature_drvs[] = {
206 {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
208 {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
209 &fme_thermal_mgmt_ops),},
210 {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
211 &fme_power_mgmt_ops),},
212 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
213 &fme_global_err_ops),},
214 {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
216 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
217 &fme_global_dperf_ops),},
218 {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
219 &fme_hssi_eth_ops),},
220 {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
222 {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
223 &fme_spi_master_ops),},
224 {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
225 &fme_nios_spi_master_ops),},
226 {FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
227 &fme_i2c_master_ops),},
228 {FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
229 &fme_eth_group_ops),},
230 {0, NULL, NULL}, /* end of arrary */
233 static struct feature_driver port_feature_drvs[] = {
234 {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
235 &ifpga_rawdev_port_hdr_ops)},
236 {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
237 &ifpga_rawdev_port_error_ops)},
238 {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
239 &ifpga_rawdev_port_uint_ops)},
240 {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
241 &ifpga_rawdev_port_stp_ops)},
242 {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
243 &ifpga_rawdev_port_afu_ops)},
244 {0, NULL, NULL}, /* end of array */
247 const char *get_fme_feature_name(unsigned int id)
249 struct feature_driver *drv = fme_feature_drvs;
261 const char *get_port_feature_name(unsigned int id)
263 struct feature_driver *drv = port_feature_drvs;
275 static void feature_uinit(struct ifpga_feature_list *list)
277 struct ifpga_feature *feature;
279 TAILQ_FOREACH(feature, list, next) {
280 if (feature->state != IFPGA_FEATURE_ATTACHED)
282 if (feature->ops && feature->ops->uinit)
283 feature->ops->uinit(feature);
287 static int feature_init(struct feature_driver *drv,
288 struct ifpga_feature_list *list)
290 struct ifpga_feature *feature;
294 TAILQ_FOREACH(feature, list, next) {
295 if (feature->state != IFPGA_FEATURE_ATTACHED)
297 if (feature->id == drv->id) {
298 feature->ops = drv->ops;
299 feature->name = drv->name;
300 if (feature->ops->init) {
301 ret = feature->ops->init(feature);
316 int fme_hw_init(struct ifpga_fme_hw *fme)
320 if (fme->state != IFPGA_FME_IMPLEMENTED)
323 ret = feature_init(fme_feature_drvs, &fme->feature_list);
330 void fme_hw_uinit(struct ifpga_fme_hw *fme)
332 feature_uinit(&fme->feature_list);
335 void port_hw_uinit(struct ifpga_port_hw *port)
337 feature_uinit(&port->feature_list);
340 int port_hw_init(struct ifpga_port_hw *port)
344 if (port->state == IFPGA_PORT_UNUSED)
347 ret = feature_init(port_feature_drvs, &port->feature_list);
357 #define FPGA_MAX_MSIX_VEC_COUNT 128
358 /* irq set buffer length for interrupt */
359 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
360 sizeof(int) * FPGA_MAX_MSIX_VEC_COUNT)
362 /* only support msix for now*/
363 static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
364 unsigned int count, s32 *fds)
366 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
367 struct vfio_irq_set *irq_set;
371 len = sizeof(irq_set_buf);
373 irq_set = (struct vfio_irq_set *)irq_set_buf;
374 irq_set->argsz = len;
375 /* 0 < irq_set->count < FPGA_MAX_MSIX_VEC_COUNT */
376 irq_set->count = count ?
377 (count > FPGA_MAX_MSIX_VEC_COUNT ?
378 FPGA_MAX_MSIX_VEC_COUNT : count) : 1;
379 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
380 VFIO_IRQ_SET_ACTION_TRIGGER;
381 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
382 irq_set->start = vec_start;
384 fd_ptr = (int *)&irq_set->data;
385 opae_memcpy(fd_ptr, fds, sizeof(int) * count);
387 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
389 printf("Error enabling MSI-X interrupts\n");
394 int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
395 unsigned int count, s32 *fds)
397 struct feature_irq_ctx *ctx = feature->ctx;
401 if (start >= feature->ctx_num || start + count > feature->ctx_num)
404 /* assume that each feature has continuous vector space in msix*/
405 ret = vfio_msix_enable_block(feature->vfio_dev_fd,
406 ctx[start].idx, count, fds);
408 for (i = 0; i < count; i++)
409 ctx[i].eventfd = fds[i];