common/cnxk: do not switch affinity back and forth
[dpdk.git] / drivers / common / cnxk / roc_bphy_irq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <fcntl.h>
5 #include <pthread.h>
6 #include <sys/ioctl.h>
7 #include <sys/mman.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include "roc_api.h"
12 #include "roc_bphy_irq.h"
13
14 struct roc_bphy_irq_usr_data {
15         uint64_t isr_base;
16         uint64_t sp;
17         uint64_t cpu;
18         uint64_t irq_num;
19 };
20
21 struct roc_bphy_irq_stack {
22         STAILQ_ENTRY(roc_bphy_irq_stack) entries;
23         void *sp_buffer;
24         int cpu;
25         int inuse;
26 };
27
28 #define ROC_BPHY_MEMZONE_NAME "roc_bphy_mz"
29 #define ROC_BPHY_CTR_DEV_PATH "/dev/otx-bphy-ctr"
30
31 #define ROC_BPHY_IOC_MAGIC 0xF3
32 #define ROC_BPHY_IOC_SET_BPHY_HANDLER                                          \
33         _IOW(ROC_BPHY_IOC_MAGIC, 1, struct roc_bphy_irq_usr_data)
34 #define ROC_BPHY_IOC_CLR_BPHY_HANDLER   _IO(ROC_BPHY_IOC_MAGIC, 2)
35 #define ROC_BPHY_IOC_GET_BPHY_MAX_IRQ   _IOR(ROC_BPHY_IOC_MAGIC, 3, uint64_t)
36 #define ROC_BPHY_IOC_GET_BPHY_BMASK_IRQ _IOR(ROC_BPHY_IOC_MAGIC, 4, uint64_t)
37
38 static STAILQ_HEAD(slisthead, roc_bphy_irq_stack)
39         irq_stacks = STAILQ_HEAD_INITIALIZER(irq_stacks);
40
41 /* Note: it is assumed that as for now there is no multiprocess support */
42 static pthread_mutex_t stacks_mutex = PTHREAD_MUTEX_INITIALIZER;
43
44 struct roc_bphy_irq_chip *
45 roc_bphy_intr_init(void)
46 {
47         struct roc_bphy_irq_chip *irq_chip;
48         uint64_t max_irq, i, avail_irqs;
49         int fd, ret;
50
51         fd = open(ROC_BPHY_CTR_DEV_PATH, O_RDWR | O_SYNC);
52         if (fd < 0) {
53                 plt_err("Failed to open %s", ROC_BPHY_CTR_DEV_PATH);
54                 return NULL;
55         }
56
57         ret = ioctl(fd, ROC_BPHY_IOC_GET_BPHY_MAX_IRQ, &max_irq);
58         if (ret < 0) {
59                 plt_err("Failed to get max irq number via ioctl");
60                 goto err_ioctl;
61         }
62
63         ret = ioctl(fd, ROC_BPHY_IOC_GET_BPHY_BMASK_IRQ, &avail_irqs);
64         if (ret < 0) {
65                 plt_err("Failed to get available irqs bitmask via ioctl");
66                 goto err_ioctl;
67         }
68
69         irq_chip = plt_zmalloc(sizeof(*irq_chip), 0);
70         if (irq_chip == NULL) {
71                 plt_err("Failed to alloc irq_chip");
72                 goto err_alloc_chip;
73         }
74
75         irq_chip->intfd = fd;
76         irq_chip->max_irq = max_irq;
77         irq_chip->avail_irq_bmask = avail_irqs;
78         irq_chip->irq_vecs =
79                 plt_zmalloc(irq_chip->max_irq * sizeof(*irq_chip->irq_vecs), 0);
80         if (irq_chip->irq_vecs == NULL) {
81                 plt_err("Failed to alloc irq_chip irq_vecs");
82                 goto err_alloc_irq;
83         }
84
85         irq_chip->mz_name = plt_zmalloc(strlen(ROC_BPHY_MEMZONE_NAME) + 1, 0);
86         if (irq_chip->mz_name == NULL) {
87                 plt_err("Failed to alloc irq_chip name");
88                 goto err_alloc_name;
89         }
90         plt_strlcpy(irq_chip->mz_name, ROC_BPHY_MEMZONE_NAME,
91                     strlen(ROC_BPHY_MEMZONE_NAME) + 1);
92
93         for (i = 0; i < irq_chip->max_irq; i++) {
94                 irq_chip->irq_vecs[i].fd = -1;
95                 irq_chip->irq_vecs[i].handler_cpu = -1;
96         }
97
98         return irq_chip;
99
100 err_alloc_name:
101         plt_free(irq_chip->irq_vecs);
102
103 err_alloc_irq:
104         plt_free(irq_chip);
105
106 err_ioctl:
107 err_alloc_chip:
108         close(fd);
109         return NULL;
110 }
111
112 void
113 roc_bphy_intr_fini(struct roc_bphy_irq_chip *irq_chip)
114 {
115         if (irq_chip == NULL)
116                 return;
117
118         close(irq_chip->intfd);
119         plt_free(irq_chip->mz_name);
120         plt_free(irq_chip->irq_vecs);
121         plt_free(irq_chip);
122 }
123
124 static void
125 roc_bphy_irq_stack_remove(int cpu)
126 {
127         struct roc_bphy_irq_stack *curr_stack;
128
129         if (pthread_mutex_lock(&stacks_mutex))
130                 return;
131
132         STAILQ_FOREACH(curr_stack, &irq_stacks, entries) {
133                 if (curr_stack->cpu == cpu)
134                         break;
135         }
136
137         if (curr_stack == NULL)
138                 goto leave;
139
140         if (curr_stack->inuse > 0)
141                 curr_stack->inuse--;
142
143         if (curr_stack->inuse == 0) {
144                 STAILQ_REMOVE(&irq_stacks, curr_stack, roc_bphy_irq_stack,
145                               entries);
146                 plt_free(curr_stack->sp_buffer);
147                 plt_free(curr_stack);
148         }
149
150 leave:
151         pthread_mutex_unlock(&stacks_mutex);
152 }
153
154 static void *
155 roc_bphy_irq_stack_get(int cpu)
156 {
157 #define ARM_STACK_ALIGNMENT (2 * sizeof(void *))
158 #define IRQ_ISR_STACK_SIZE  0x200000
159
160         struct roc_bphy_irq_stack *curr_stack;
161         void *retval = NULL;
162
163         if (pthread_mutex_lock(&stacks_mutex))
164                 return NULL;
165
166         STAILQ_FOREACH(curr_stack, &irq_stacks, entries) {
167                 if (curr_stack->cpu == cpu) {
168                         curr_stack->inuse++;
169                         retval = ((char *)curr_stack->sp_buffer) +
170                                  IRQ_ISR_STACK_SIZE;
171                         goto found_stack;
172                 }
173         }
174
175         curr_stack = plt_zmalloc(sizeof(struct roc_bphy_irq_stack), 0);
176         if (curr_stack == NULL)
177                 goto err_stack;
178
179         curr_stack->sp_buffer =
180                 plt_zmalloc(IRQ_ISR_STACK_SIZE * 2, ARM_STACK_ALIGNMENT);
181         if (curr_stack->sp_buffer == NULL)
182                 goto err_buffer;
183
184         curr_stack->cpu = cpu;
185         curr_stack->inuse = 0;
186         STAILQ_INSERT_TAIL(&irq_stacks, curr_stack, entries);
187         retval = ((char *)curr_stack->sp_buffer) + IRQ_ISR_STACK_SIZE;
188
189 found_stack:
190         pthread_mutex_unlock(&stacks_mutex);
191         return retval;
192
193 err_buffer:
194         plt_free(curr_stack);
195
196 err_stack:
197         pthread_mutex_unlock(&stacks_mutex);
198         return NULL;
199 }
200
201 void
202 roc_bphy_intr_handler(unsigned int irq_num)
203 {
204         struct roc_bphy_irq_chip *irq_chip;
205         const struct plt_memzone *mz;
206
207         mz = plt_memzone_lookup(ROC_BPHY_MEMZONE_NAME);
208         if (mz == NULL)
209                 return;
210
211         irq_chip = *(struct roc_bphy_irq_chip **)mz->addr;
212         if (irq_chip == NULL)
213                 return;
214
215         if (irq_chip->irq_vecs[irq_num].handler != NULL)
216                 irq_chip->irq_vecs[irq_num].handler(
217                         (int)irq_num, irq_chip->irq_vecs[irq_num].isr_data);
218
219         roc_atf_ret();
220 }
221
222 static int
223 roc_bphy_irq_handler_set(struct roc_bphy_irq_chip *chip, int cpu, int irq_num,
224                          void (*isr)(int irq_num, void *isr_data),
225                          void *isr_data)
226 {
227         struct roc_bphy_irq_usr_data irq_usr;
228         const struct plt_memzone *mz;
229         int retval, rc;
230         char *env;
231
232         mz = plt_memzone_lookup(chip->mz_name);
233         if (mz == NULL) {
234                 /* what we want is just a pointer to chip, not object itself */
235                 mz = plt_memzone_reserve_cache_align(chip->mz_name,
236                                                      sizeof(chip));
237                 if (mz == NULL)
238                         return -ENOMEM;
239         }
240
241         if (chip->irq_vecs[irq_num].handler != NULL)
242                 return -EINVAL;
243
244         irq_usr.isr_base = (uint64_t)roc_bphy_intr_handler;
245         irq_usr.sp = (uint64_t)roc_bphy_irq_stack_get(cpu);
246         irq_usr.cpu = cpu;
247         if (irq_usr.sp == 0)
248                 return -ENOMEM;
249
250         /* On simulator memory locking operation takes much time. We want
251          * to skip this when running in such an environment.
252          */
253         env = getenv("BPHY_INTR_MLOCK_DISABLE");
254         if (env == NULL) {
255                 rc = mlockall(MCL_CURRENT | MCL_FUTURE);
256                 if (rc < 0)
257                         plt_warn("Failed to lock memory into RAM");
258         }
259
260         *((struct roc_bphy_irq_chip **)(mz->addr)) = chip;
261         irq_usr.irq_num = irq_num;
262         chip->irq_vecs[irq_num].handler_cpu = cpu;
263         chip->irq_vecs[irq_num].handler = isr;
264         chip->irq_vecs[irq_num].isr_data = isr_data;
265         retval = ioctl(chip->intfd, ROC_BPHY_IOC_SET_BPHY_HANDLER, &irq_usr);
266         if (retval != 0) {
267                 roc_bphy_irq_stack_remove(cpu);
268                 chip->irq_vecs[irq_num].handler = NULL;
269                 chip->irq_vecs[irq_num].handler_cpu = -1;
270         } else {
271                 chip->n_handlers++;
272         }
273
274         return retval;
275 }
276
277 bool
278 roc_bphy_intr_available(struct roc_bphy_irq_chip *irq_chip, int irq_num)
279 {
280         if (irq_num < 0 || (uint64_t)irq_num >= irq_chip->max_irq)
281                 return false;
282
283         return irq_chip->avail_irq_bmask & BIT(irq_num);
284 }
285
286 uint64_t
287 roc_bphy_intr_max_get(struct roc_bphy_irq_chip *irq_chip)
288 {
289         return irq_chip->max_irq;
290 }
291
292 int
293 roc_bphy_intr_clear(struct roc_bphy_irq_chip *chip, int irq_num)
294 {
295         const struct plt_memzone *mz;
296         int retval;
297
298         if (chip == NULL)
299                 return -EINVAL;
300         if ((uint64_t)irq_num >= chip->max_irq || irq_num < 0)
301                 return -EINVAL;
302         if (!roc_bphy_intr_available(chip, irq_num))
303                 return -ENOTSUP;
304         if (chip->irq_vecs[irq_num].handler == NULL)
305                 return -EINVAL;
306         mz = plt_memzone_lookup(chip->mz_name);
307         if (mz == NULL)
308                 return -ENXIO;
309
310         retval = ioctl(chip->intfd, ROC_BPHY_IOC_CLR_BPHY_HANDLER, irq_num);
311         if (retval == 0) {
312                 roc_bphy_irq_stack_remove(chip->irq_vecs[irq_num].handler_cpu);
313                 chip->n_handlers--;
314                 chip->irq_vecs[irq_num].isr_data = NULL;
315                 chip->irq_vecs[irq_num].handler = NULL;
316                 chip->irq_vecs[irq_num].handler_cpu = -1;
317                 if (chip->n_handlers == 0) {
318                         retval = plt_memzone_free(mz);
319                         if (retval < 0)
320                                 plt_err("Failed to free memzone: irq %d",
321                                         irq_num);
322                 }
323         } else {
324                 plt_err("Failed to clear bphy interrupt handler");
325         }
326
327         return retval;
328 }
329
330 int
331 roc_bphy_intr_register(struct roc_bphy_irq_chip *irq_chip,
332                        struct roc_bphy_intr *intr)
333 {
334         int ret;
335
336         if (!roc_bphy_intr_available(irq_chip, intr->irq_num))
337                 return -ENOTSUP;
338
339         ret = roc_bphy_irq_handler_set(irq_chip, intr->cpu, intr->irq_num,
340                                        intr->intr_handler, intr->isr_data);
341
342         return ret;
343 }