common/cnxk: support tunnel header verification
[dpdk.git] / drivers / raw / ifpga / base / ifpga_feature_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <sys/ioctl.h>
6 #include <rte_vfio.h>
7
8 #include "ifpga_feature_dev.h"
9
10 /*
11  * Enable Port by clear the port soft reset bit, which is set by default.
12  * The AFU is unable to respond to any MMIO access while in reset.
13  * __fpga_port_enable function should only be used after __fpga_port_disable
14  * function.
15  */
16 void __fpga_port_enable(struct ifpga_port_hw *port)
17 {
18         struct feature_port_header *port_hdr;
19         struct feature_port_control control;
20
21         WARN_ON(!port->disable_count);
22
23         if (--port->disable_count != 0)
24                 return;
25
26         port_hdr = get_port_feature_ioaddr_by_index(port,
27                                                     PORT_FEATURE_ID_HEADER);
28         WARN_ON(!port_hdr);
29
30         control.csr = readq(&port_hdr->control);
31         control.port_sftrst = 0x0;
32         writeq(control.csr, &port_hdr->control);
33 }
34
35 int __fpga_port_disable(struct ifpga_port_hw *port)
36 {
37         struct feature_port_header *port_hdr;
38         struct feature_port_control control;
39
40         if (port->disable_count++ != 0)
41                 return 0;
42
43         port_hdr = get_port_feature_ioaddr_by_index(port,
44                                                     PORT_FEATURE_ID_HEADER);
45         WARN_ON(!port_hdr);
46
47         /* Set port soft reset */
48         control.csr = readq(&port_hdr->control);
49         control.port_sftrst = 0x1;
50         writeq(control.csr, &port_hdr->control);
51
52         /*
53          * HW sets ack bit to 1 when all outstanding requests have been drained
54          * on this port and minimum soft reset pulse width has elapsed.
55          * Driver polls port_soft_reset_ack to determine if reset done by HW.
56          */
57         control.port_sftrst_ack = 1;
58
59         if (fpga_wait_register_field(port_sftrst_ack, control,
60                                      &port_hdr->control, RST_POLL_TIMEOUT,
61                                      RST_POLL_INVL)) {
62                 dev_err(port, "timeout, fail to reset device\n");
63                 return -ETIMEDOUT;
64         }
65
66         return 0;
67 }
68
69 int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70 {
71         struct feature_port_header *port_hdr;
72         u64 guidl, guidh;
73
74         if (!uuid)
75                 return -EINVAL;
76
77         port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
78
79         spinlock_lock(&port->lock);
80         guidl = readq(&port_hdr->afu_header.guid.b[0]);
81         guidh = readq(&port_hdr->afu_header.guid.b[8]);
82         spinlock_unlock(&port->lock);
83
84         opae_memcpy(uuid->b, &guidl, sizeof(u64));
85         opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
86
87         return 0;
88 }
89
90 int fpga_get_pr_uuid(struct ifpga_fme_hw *fme, struct uuid *uuid)
91 {
92         struct feature_fme_pr *fme_pr;
93         u64 guidl, guidh;
94
95         if (!fme || !uuid)
96                 return -EINVAL;
97
98         fme_pr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_PR_MGMT);
99
100         spinlock_lock(&fme->lock);
101         guidl = readq(&fme_pr->fme_pr_intfc_id_l);
102         guidh = readq(&fme_pr->fme_pr_intfc_id_h);
103         spinlock_unlock(&fme->lock);
104
105         opae_memcpy(uuid->b, &guidl, sizeof(u64));
106         opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
107
108         return 0;
109 }
110
111 /* Mask / Unmask Port Errors by the Error Mask register. */
112 void port_err_mask(struct ifpga_port_hw *port, bool mask)
113 {
114         struct feature_port_error *port_err;
115         struct feature_port_err_key err_mask;
116
117         port_err = get_port_feature_ioaddr_by_index(port,
118                                                     PORT_FEATURE_ID_ERROR);
119
120         if (mask)
121                 err_mask.csr = PORT_ERR_MASK;
122         else
123                 err_mask.csr = 0;
124
125         writeq(err_mask.csr, &port_err->error_mask);
126 }
127
128 /* Clear All Port Errors. */
129 int port_err_clear(struct ifpga_port_hw *port, u64 err)
130 {
131         struct feature_port_header *port_hdr;
132         struct feature_port_error *port_err;
133         struct feature_port_err_key mask;
134         struct feature_port_first_err_key first;
135         struct feature_port_status status;
136         int ret = 0;
137
138         port_err = get_port_feature_ioaddr_by_index(port,
139                                                     PORT_FEATURE_ID_ERROR);
140         port_hdr = get_port_feature_ioaddr_by_index(port,
141                                                     PORT_FEATURE_ID_HEADER);
142
143         /*
144          * Clear All Port Errors
145          *
146          * - Check for AP6 State
147          * - Halt Port by keeping Port in reset
148          * - Set PORT Error mask to all 1 to mask errors
149          * - Clear all errors
150          * - Set Port mask to all 0 to enable errors
151          * - All errors start capturing new errors
152          * - Enable Port by pulling the port out of reset
153          */
154
155         /* If device is still in AP6 state, can not clear any error.*/
156         status.csr = readq(&port_hdr->status);
157         if (status.power_state == PORT_POWER_STATE_AP6) {
158                 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
159                 return -EBUSY;
160         }
161
162         /* Halt Port by keeping Port in reset */
163         ret = __fpga_port_disable(port);
164         if (ret)
165                 return ret;
166
167         /* Mask all errors */
168         port_err_mask(port, true);
169
170         /* Clear errors if err input matches with current port errors.*/
171         mask.csr = readq(&port_err->port_error);
172
173         if (mask.csr == err) {
174                 writeq(mask.csr, &port_err->port_error);
175
176                 first.csr = readq(&port_err->port_first_error);
177                 writeq(first.csr, &port_err->port_first_error);
178         } else {
179                 ret = -EBUSY;
180         }
181
182         /* Clear mask */
183         port_err_mask(port, false);
184
185         /* Enable the Port by clear the reset */
186         __fpga_port_enable(port);
187
188         return ret;
189 }
190
191 int port_clear_error(struct ifpga_port_hw *port)
192 {
193         struct feature_port_error *port_err;
194         struct feature_port_err_key error;
195
196         port_err = get_port_feature_ioaddr_by_index(port,
197                                                     PORT_FEATURE_ID_ERROR);
198         error.csr = readq(&port_err->port_error);
199
200         dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
201
202         return port_err_clear(port, error.csr);
203 }
204
205 static struct feature_driver fme_feature_drvs[] = {
206         {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
207                         &fme_hdr_ops),},
208         {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
209                         &fme_thermal_mgmt_ops),},
210         {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
211                         &fme_power_mgmt_ops),},
212         {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
213                         &fme_global_err_ops),},
214         {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
215                         &fme_pr_mgmt_ops),},
216         {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
217                         &fme_global_dperf_ops),},
218         {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
219         &fme_hssi_eth_ops),},
220         {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
221         &fme_emif_ops),},
222         {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
223         &fme_spi_master_ops),},
224         {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
225         &fme_nios_spi_master_ops),},
226         {FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
227         &fme_i2c_master_ops),},
228         {FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
229         &fme_eth_group_ops),},
230         {0, NULL, NULL}, /* end of arrary */
231 };
232
233 static struct feature_driver port_feature_drvs[] = {
234         {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
235                         &ifpga_rawdev_port_hdr_ops)},
236         {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
237                         &ifpga_rawdev_port_error_ops)},
238         {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
239                         &ifpga_rawdev_port_uint_ops)},
240         {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
241                         &ifpga_rawdev_port_stp_ops)},
242         {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
243                         &ifpga_rawdev_port_afu_ops)},
244         {0, NULL, NULL}, /* end of array */
245 };
246
247 const char *get_fme_feature_name(unsigned int id)
248 {
249         struct feature_driver *drv = fme_feature_drvs;
250
251         while (drv->name) {
252                 if (drv->id == id)
253                         return drv->name;
254
255                 drv++;
256         }
257
258         return NULL;
259 }
260
261 const char *get_port_feature_name(unsigned int id)
262 {
263         struct feature_driver *drv = port_feature_drvs;
264
265         while (drv->name) {
266                 if (drv->id == id)
267                         return drv->name;
268
269                 drv++;
270         }
271
272         return NULL;
273 }
274
275 static void feature_uinit(struct ifpga_feature_list *list)
276 {
277         struct ifpga_feature *feature;
278
279         TAILQ_FOREACH(feature, list, next) {
280                 if (feature->state != IFPGA_FEATURE_ATTACHED)
281                         continue;
282                 if (feature->ops && feature->ops->uinit)
283                         feature->ops->uinit(feature);
284         }
285 }
286
287 static int feature_init(struct feature_driver *drv,
288                 struct ifpga_feature_list *list)
289 {
290         struct ifpga_feature *feature;
291         int ret;
292
293         while (drv->ops) {
294                 TAILQ_FOREACH(feature, list, next) {
295                         if (feature->state != IFPGA_FEATURE_ATTACHED)
296                                 continue;
297                         if (feature->id == drv->id) {
298                                 feature->ops = drv->ops;
299                                 feature->name = drv->name;
300                                 if (feature->ops->init) {
301                                         ret = feature->ops->init(feature);
302                                         if (ret)
303                                                 goto error;
304                                 }
305                         }
306                 }
307                 drv++;
308         }
309
310         return 0;
311 error:
312         feature_uinit(list);
313         return ret;
314 }
315
316 int fme_hw_init(struct ifpga_fme_hw *fme)
317 {
318         int ret;
319
320         if (fme->state != IFPGA_FME_IMPLEMENTED)
321                 return -ENODEV;
322
323         ret = feature_init(fme_feature_drvs, &fme->feature_list);
324         if (ret)
325                 return ret;
326
327         return 0;
328 }
329
330 void fme_hw_uinit(struct ifpga_fme_hw *fme)
331 {
332         feature_uinit(&fme->feature_list);
333 }
334
335 void port_hw_uinit(struct ifpga_port_hw *port)
336 {
337         feature_uinit(&port->feature_list);
338 }
339
340 int port_hw_init(struct ifpga_port_hw *port)
341 {
342         int ret;
343
344         if (port->state == IFPGA_PORT_UNUSED)
345                 return 0;
346
347         ret = feature_init(port_feature_drvs, &port->feature_list);
348         if (ret)
349                 goto error;
350
351         return 0;
352 error:
353         port_hw_uinit(port);
354         return ret;
355 }
356
357 #define FPGA_MAX_MSIX_VEC_COUNT 128
358 /* irq set buffer length for interrupt */
359 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
360                                 sizeof(int) * FPGA_MAX_MSIX_VEC_COUNT)
361
362 /* only support msix for now*/
363 static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
364                                   unsigned int count, s32 *fds)
365 {
366         char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
367         struct vfio_irq_set *irq_set;
368         int len, ret;
369         int *fd_ptr;
370
371         len = sizeof(irq_set_buf);
372
373         irq_set = (struct vfio_irq_set *)irq_set_buf;
374         irq_set->argsz = len;
375         /* 0 < irq_set->count < FPGA_MAX_MSIX_VEC_COUNT */
376         irq_set->count = count ?
377                 (count > FPGA_MAX_MSIX_VEC_COUNT ?
378                  FPGA_MAX_MSIX_VEC_COUNT : count) : 1;
379         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
380                                 VFIO_IRQ_SET_ACTION_TRIGGER;
381         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
382         irq_set->start = vec_start;
383
384         fd_ptr = (int *)&irq_set->data;
385         opae_memcpy(fd_ptr, fds, sizeof(int) * count);
386
387         ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
388         if (ret)
389                 printf("Error enabling MSI-X interrupts\n");
390
391         return ret;
392 }
393
394 int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
395                         unsigned int count, s32 *fds)
396 {
397         struct feature_irq_ctx *ctx = feature->ctx;
398         unsigned int i;
399         int ret;
400
401         if (start >= feature->ctx_num || start + count > feature->ctx_num)
402                 return -EINVAL;
403
404         /* assume that each feature has continuous vector space in msix*/
405         ret = vfio_msix_enable_block(feature->vfio_dev_fd,
406                                      ctx[start].idx, count, fds);
407         if (!ret) {
408                 for (i = 0; i < count; i++)
409                         ctx[i].eventfd = fds[i];
410         }
411
412         return ret;
413 }