1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
8 pr_err_handle(struct feature_fme_pr *fme_pr)
10 struct feature_fme_pr_status fme_pr_status;
11 unsigned long err_code;
15 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
16 if (!fme_pr_status.pr_status)
19 err_code = readq(&fme_pr->ccip_fme_pr_err);
20 fme_pr_error = err_code;
22 for (i = 0; i < PR_MAX_ERR_NUM; i++) {
23 if (err_code & (1 << i))
24 dev_info(NULL, "%s\n", pr_err_msg[i]);
27 writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
31 static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
32 struct fpga_pr_info *info)
34 struct feature_fme_pr *fme_pr;
35 struct feature_fme_pr_ctl fme_pr_ctl;
36 struct feature_fme_pr_status fme_pr_status;
38 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
39 FME_FEATURE_ID_PR_MGMT);
43 if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
46 dev_info(fme_dev, "resetting PR before initiated PR\n");
48 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
49 fme_pr_ctl.pr_reset = 1;
50 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
52 fme_pr_ctl.pr_reset_ack = 1;
54 if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
55 &fme_pr->ccip_fme_pr_control,
56 PR_WAIT_TIMEOUT, 1)) {
57 dev_err(fme_dev, "maximum PR timeout\n");
61 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
62 fme_pr_ctl.pr_reset = 0;
63 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
65 dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
67 fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
69 if (fpga_wait_register_field(pr_host_status, fme_pr_status,
70 &fme_pr->ccip_fme_pr_status,
71 PR_WAIT_TIMEOUT, 1)) {
72 dev_err(fme_dev, "maximum PR timeout\n");
76 dev_info(fme_dev, "check if have any previous PR error\n");
77 pr_err_handle(fme_pr);
81 static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
82 int port_id, const char *buf, size_t count,
83 struct fpga_pr_info *info)
85 struct feature_fme_pr *fme_pr;
86 struct feature_fme_pr_ctl fme_pr_ctl;
87 struct feature_fme_pr_status fme_pr_status;
88 struct feature_fme_pr_data fme_pr_data;
92 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
93 FME_FEATURE_ID_PR_MGMT);
97 dev_info(fme_dev, "set PR port ID and start request\n");
99 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
100 fme_pr_ctl.pr_regionid = port_id;
101 fme_pr_ctl.pr_start_req = 1;
102 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
104 dev_info(fme_dev, "pushing data from bitstream to HW\n");
106 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
107 pr_credit = fme_pr_status.pr_credit;
111 while (pr_credit <= 1) {
112 if (delay++ > PR_WAIT_TIMEOUT) {
113 dev_err(fme_dev, "maximum try\n");
115 info->pr_err = pr_err_handle(fme_pr);
116 return info->pr_err ? -EIO : -ETIMEDOUT;
120 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
121 pr_credit = fme_pr_status.pr_credit;
124 if (count >= fme_dev->pr_bandwidth) {
125 switch (fme_dev->pr_bandwidth) {
127 fme_pr_data.rsvd = 0;
128 fme_pr_data.pr_data_raw = *((const u32 *)buf);
129 writeq(fme_pr_data.csr,
130 &fme_pr->ccip_fme_pr_data);
137 buf += fme_dev->pr_bandwidth;
138 count -= fme_dev->pr_bandwidth;
151 static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
152 struct fpga_pr_info *info)
154 struct feature_fme_pr *fme_pr;
155 struct feature_fme_pr_ctl fme_pr_ctl;
157 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
158 FME_FEATURE_ID_PR_MGMT);
160 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
161 fme_pr_ctl.pr_push_complete = 1;
162 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
164 dev_info(fme_dev, "green bitstream push complete\n");
165 dev_info(fme_dev, "waiting for HW to release PR resource\n");
167 fme_pr_ctl.pr_start_req = 0;
169 if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
170 &fme_pr->ccip_fme_pr_control,
171 PR_WAIT_TIMEOUT, 1)) {
172 printf("maximum try.\n");
176 dev_info(fme_dev, "PR operation complete, checking status\n");
177 info->pr_err = pr_err_handle(fme_pr);
181 dev_info(fme_dev, "PR done successfully\n");
185 static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
186 struct fpga_pr_info *info, const char *buf,
191 info->state = FPGA_PR_STATE_WRITE_INIT;
192 ret = fme_pr_write_init(fme_dev, info);
194 dev_err(fme_dev, "Error preparing FPGA for writing\n");
195 info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
200 * Write the FPGA image to the FPGA.
202 info->state = FPGA_PR_STATE_WRITE;
203 ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
205 dev_err(fme_dev, "Error while writing image data to FPGA\n");
206 info->state = FPGA_PR_STATE_WRITE_ERR;
211 * After all the FPGA image has been written, do the device specific
212 * steps to finish and set the FPGA into operating mode.
214 info->state = FPGA_PR_STATE_WRITE_COMPLETE;
215 ret = fme_pr_write_complete(fme_dev, info);
217 dev_err(fme_dev, "Error after writing image data to FPGA\n");
218 info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
221 info->state = FPGA_PR_STATE_DONE;
226 static int fme_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
227 u32 size, u64 *status)
229 struct feature_fme_header *fme_hdr;
230 struct feature_fme_capability fme_capability;
231 struct ifpga_fme_hw *fme = &hw->fme;
232 struct fpga_pr_info info;
233 struct ifpga_port_hw *port;
236 if (!buffer || size == 0)
238 if (fme->state != IFPGA_FME_IMPLEMENTED)
242 * Padding extra zeros to align PR buffer with PR bandwidth, HW will
243 * ignore these zeros automatically.
245 size = IFPGA_ALIGN(size, fme->pr_bandwidth);
247 /* get fme header region */
248 fme_hdr = get_fme_feature_ioaddr_by_index(fme,
249 FME_FEATURE_ID_HEADER);
254 fme_capability.csr = readq(&fme_hdr->capability);
255 if (port_id >= fme_capability.num_ports) {
256 dev_err(fme, "port number more than maximum\n");
260 opae_memset(&info, 0, sizeof(struct fpga_pr_info));
261 info.flags = FPGA_MGR_PARTIAL_RECONFIG;
262 info.port_id = port_id;
264 spinlock_lock(&fme->lock);
266 /* get port device by port_id */
267 port = &hw->port[port_id];
269 /* Disable Port before PR */
270 fpga_port_disable(port);
272 ret = fpga_pr_buf_load(fme, &info, buffer, size);
274 *status = info.pr_err;
276 /* Re-enable Port after PR finished */
277 fpga_port_enable(port);
278 spinlock_unlock(&fme->lock);
283 int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
284 u32 size, u64 *status)
286 const struct bts_header *bts_hdr;
288 struct ifpga_port_hw *port;
292 if (!buffer || size == 0) {
293 dev_err(hw, "invalid parameter\n");
297 bts_hdr = (const struct bts_header *)buffer;
299 if (is_valid_bts(bts_hdr)) {
300 dev_info(hw, "this is a valid bitsteam..\n");
301 header_size = sizeof(struct bts_header) +
302 bts_hdr->metadata_len;
303 if (size < header_size)
306 buf = buffer + header_size;
308 dev_err(hw, "this is an invalid bitstream..\n");
312 /* clean port error before do PR */
313 port = &hw->port[port_id];
314 ret = port_clear_error(port);
316 dev_err(hw, "port cannot clear error\n");
320 return fme_pr(hw, port_id, buf, size, status);
323 static int fme_pr_mgmt_init(struct ifpga_feature *feature)
325 struct feature_fme_pr *fme_pr;
326 struct feature_header fme_pr_header;
327 struct ifpga_fme_hw *fme;
329 dev_info(NULL, "FME PR MGMT Init.\n");
331 fme = (struct ifpga_fme_hw *)feature->parent;
333 fme_pr = (struct feature_fme_pr *)feature->addr;
335 fme_pr_header.csr = readq(&fme_pr->header);
336 if (fme_pr_header.revision == 2) {
337 dev_info(NULL, "using 512-bit PR\n");
338 fme->pr_bandwidth = 64;
340 dev_info(NULL, "using 32-bit PR\n");
341 fme->pr_bandwidth = 4;
347 static void fme_pr_mgmt_uinit(struct ifpga_feature *feature)
351 dev_info(NULL, "FME PR MGMT UInit.\n");
354 struct ifpga_feature_ops fme_pr_mgmt_ops = {
355 .init = fme_pr_mgmt_init,
356 .uinit = fme_pr_mgmt_uinit,