common/cnxk: set BPHY IRQ handler
[dpdk.git] / drivers / common / cnxk / roc_bphy_irq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <fcntl.h>
5 #include <pthread.h>
6 #include <sys/ioctl.h>
7 #include <sys/mman.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include "roc_api.h"
12 #include "roc_bphy_irq.h"
13
14 #define roc_cpuset_t cpu_set_t
15
16 struct roc_bphy_irq_usr_data {
17         uint64_t isr_base;
18         uint64_t sp;
19         uint64_t cpu;
20         uint64_t irq_num;
21 };
22
23 struct roc_bphy_irq_stack {
24         STAILQ_ENTRY(roc_bphy_irq_stack) entries;
25         void *sp_buffer;
26         int cpu;
27         int inuse;
28 };
29
30 #define ROC_BPHY_MEMZONE_NAME "roc_bphy_mz"
31 #define ROC_BPHY_CTR_DEV_PATH "/dev/otx-bphy-ctr"
32
33 #define ROC_BPHY_IOC_MAGIC 0xF3
34 #define ROC_BPHY_IOC_SET_BPHY_HANDLER                                          \
35         _IOW(ROC_BPHY_IOC_MAGIC, 1, struct roc_bphy_irq_usr_data)
36 #define ROC_BPHY_IOC_GET_BPHY_MAX_IRQ   _IOR(ROC_BPHY_IOC_MAGIC, 3, uint64_t)
37 #define ROC_BPHY_IOC_GET_BPHY_BMASK_IRQ _IOR(ROC_BPHY_IOC_MAGIC, 4, uint64_t)
38
39 static STAILQ_HEAD(slisthead, roc_bphy_irq_stack)
40         irq_stacks = STAILQ_HEAD_INITIALIZER(irq_stacks);
41
42 /* Note: it is assumed that as for now there is no multiprocess support */
43 static pthread_mutex_t stacks_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 struct roc_bphy_irq_chip *
46 roc_bphy_intr_init(void)
47 {
48         struct roc_bphy_irq_chip *irq_chip;
49         uint64_t max_irq, i, avail_irqs;
50         int fd, ret;
51
52         fd = open(ROC_BPHY_CTR_DEV_PATH, O_RDWR | O_SYNC);
53         if (fd < 0) {
54                 plt_err("Failed to open %s", ROC_BPHY_CTR_DEV_PATH);
55                 return NULL;
56         }
57
58         ret = ioctl(fd, ROC_BPHY_IOC_GET_BPHY_MAX_IRQ, &max_irq);
59         if (ret < 0) {
60                 plt_err("Failed to get max irq number via ioctl");
61                 goto err_ioctl;
62         }
63
64         ret = ioctl(fd, ROC_BPHY_IOC_GET_BPHY_BMASK_IRQ, &avail_irqs);
65         if (ret < 0) {
66                 plt_err("Failed to get available irqs bitmask via ioctl");
67                 goto err_ioctl;
68         }
69
70         irq_chip = plt_zmalloc(sizeof(*irq_chip), 0);
71         if (irq_chip == NULL) {
72                 plt_err("Failed to alloc irq_chip");
73                 goto err_alloc_chip;
74         }
75
76         irq_chip->intfd = fd;
77         irq_chip->max_irq = max_irq;
78         irq_chip->avail_irq_bmask = avail_irqs;
79         irq_chip->irq_vecs =
80                 plt_zmalloc(irq_chip->max_irq * sizeof(*irq_chip->irq_vecs), 0);
81         if (irq_chip->irq_vecs == NULL) {
82                 plt_err("Failed to alloc irq_chip irq_vecs");
83                 goto err_alloc_irq;
84         }
85
86         irq_chip->mz_name = plt_zmalloc(strlen(ROC_BPHY_MEMZONE_NAME) + 1, 0);
87         if (irq_chip->mz_name == NULL) {
88                 plt_err("Failed to alloc irq_chip name");
89                 goto err_alloc_name;
90         }
91         plt_strlcpy(irq_chip->mz_name, ROC_BPHY_MEMZONE_NAME,
92                     strlen(ROC_BPHY_MEMZONE_NAME) + 1);
93
94         for (i = 0; i < irq_chip->max_irq; i++) {
95                 irq_chip->irq_vecs[i].fd = -1;
96                 irq_chip->irq_vecs[i].handler_cpu = -1;
97         }
98
99         return irq_chip;
100
101 err_alloc_name:
102         plt_free(irq_chip->irq_vecs);
103
104 err_alloc_irq:
105         plt_free(irq_chip);
106
107 err_ioctl:
108 err_alloc_chip:
109         close(fd);
110         return NULL;
111 }
112
113 void
114 roc_bphy_intr_fini(struct roc_bphy_irq_chip *irq_chip)
115 {
116         if (irq_chip == NULL)
117                 return;
118
119         close(irq_chip->intfd);
120         plt_free(irq_chip->mz_name);
121         plt_free(irq_chip->irq_vecs);
122         plt_free(irq_chip);
123 }
124
125 void
126 roc_bphy_irq_stack_remove(int cpu)
127 {
128         struct roc_bphy_irq_stack *curr_stack;
129
130         if (pthread_mutex_lock(&stacks_mutex))
131                 return;
132
133         STAILQ_FOREACH(curr_stack, &irq_stacks, entries) {
134                 if (curr_stack->cpu == cpu)
135                         break;
136         }
137
138         if (curr_stack == NULL)
139                 goto leave;
140
141         if (curr_stack->inuse > 0)
142                 curr_stack->inuse--;
143
144         if (curr_stack->inuse == 0) {
145                 STAILQ_REMOVE(&irq_stacks, curr_stack, roc_bphy_irq_stack,
146                               entries);
147                 plt_free(curr_stack->sp_buffer);
148                 plt_free(curr_stack);
149         }
150
151 leave:
152         pthread_mutex_unlock(&stacks_mutex);
153 }
154
155 void *
156 roc_bphy_irq_stack_get(int cpu)
157 {
158 #define ARM_STACK_ALIGNMENT (2 * sizeof(void *))
159 #define IRQ_ISR_STACK_SIZE  0x200000
160
161         struct roc_bphy_irq_stack *curr_stack;
162         void *retval = NULL;
163
164         if (pthread_mutex_lock(&stacks_mutex))
165                 return NULL;
166
167         STAILQ_FOREACH(curr_stack, &irq_stacks, entries) {
168                 if (curr_stack->cpu == cpu) {
169                         curr_stack->inuse++;
170                         retval = ((char *)curr_stack->sp_buffer) +
171                                  IRQ_ISR_STACK_SIZE;
172                         goto found_stack;
173                 }
174         }
175
176         curr_stack = plt_zmalloc(sizeof(struct roc_bphy_irq_stack), 0);
177         if (curr_stack == NULL)
178                 goto err_stack;
179
180         curr_stack->sp_buffer =
181                 plt_zmalloc(IRQ_ISR_STACK_SIZE * 2, ARM_STACK_ALIGNMENT);
182         if (curr_stack->sp_buffer == NULL)
183                 goto err_buffer;
184
185         curr_stack->cpu = cpu;
186         curr_stack->inuse = 0;
187         STAILQ_INSERT_TAIL(&irq_stacks, curr_stack, entries);
188         retval = ((char *)curr_stack->sp_buffer) + IRQ_ISR_STACK_SIZE;
189
190 found_stack:
191         pthread_mutex_unlock(&stacks_mutex);
192         return retval;
193
194 err_buffer:
195         plt_free(curr_stack);
196
197 err_stack:
198         pthread_mutex_unlock(&stacks_mutex);
199         return NULL;
200 }
201
202 void
203 roc_bphy_intr_handler(unsigned int irq_num)
204 {
205         struct roc_bphy_irq_chip *irq_chip;
206         const struct plt_memzone *mz;
207
208         mz = plt_memzone_lookup(ROC_BPHY_MEMZONE_NAME);
209         if (mz == NULL)
210                 return;
211
212         irq_chip = *(struct roc_bphy_irq_chip **)mz->addr;
213         if (irq_chip == NULL)
214                 return;
215
216         if (irq_chip->irq_vecs[irq_num].handler != NULL)
217                 irq_chip->irq_vecs[irq_num].handler(
218                         (int)irq_num, irq_chip->irq_vecs[irq_num].isr_data);
219
220         roc_atf_ret();
221 }
222
223 int
224 roc_bphy_irq_handler_set(struct roc_bphy_irq_chip *chip, int irq_num,
225                          void (*isr)(int irq_num, void *isr_data),
226                          void *isr_data)
227 {
228         roc_cpuset_t orig_cpuset, intr_cpuset;
229         struct roc_bphy_irq_usr_data irq_usr;
230         const struct plt_memzone *mz;
231         int i, retval, curr_cpu, rc;
232         char *env;
233
234         mz = plt_memzone_lookup(chip->mz_name);
235         if (mz == NULL) {
236                 /* what we want is just a pointer to chip, not object itself */
237                 mz = plt_memzone_reserve_cache_align(chip->mz_name,
238                                                      sizeof(chip));
239                 if (mz == NULL)
240                         return -ENOMEM;
241         }
242
243         if (chip->irq_vecs[irq_num].handler != NULL)
244                 return -EINVAL;
245
246         rc = pthread_getaffinity_np(pthread_self(), sizeof(orig_cpuset),
247                                     &orig_cpuset);
248         if (rc < 0) {
249                 plt_err("Failed to get affinity mask");
250                 return rc;
251         }
252
253         for (curr_cpu = -1, i = 0; i < CPU_SETSIZE; i++)
254                 if (CPU_ISSET(i, &orig_cpuset))
255                         curr_cpu = i;
256         if (curr_cpu < 0)
257                 return -ENOENT;
258
259         CPU_ZERO(&intr_cpuset);
260         CPU_SET(curr_cpu, &intr_cpuset);
261         retval = pthread_setaffinity_np(pthread_self(), sizeof(intr_cpuset),
262                                         &intr_cpuset);
263         if (rc < 0) {
264                 plt_err("Failed to set affinity mask");
265                 return rc;
266         }
267
268         irq_usr.isr_base = (uint64_t)roc_bphy_intr_handler;
269         irq_usr.sp = (uint64_t)roc_bphy_irq_stack_get(curr_cpu);
270         irq_usr.cpu = curr_cpu;
271         if (irq_usr.sp == 0) {
272                 rc = pthread_setaffinity_np(pthread_self(), sizeof(orig_cpuset),
273                                             &orig_cpuset);
274                 if (rc < 0)
275                         plt_err("Failed to restore affinity mask");
276                 return rc;
277         }
278
279         /* On simulator memory locking operation takes much time. We want
280          * to skip this when running in such an environment.
281          */
282         env = getenv("BPHY_INTR_MLOCK_DISABLE");
283         if (env == NULL) {
284                 rc = mlockall(MCL_CURRENT | MCL_FUTURE);
285                 if (rc < 0)
286                         plt_warn("Failed to lock memory into RAM");
287         }
288
289         *((struct roc_bphy_irq_chip **)(mz->addr)) = chip;
290         irq_usr.irq_num = irq_num;
291         chip->irq_vecs[irq_num].handler_cpu = curr_cpu;
292         chip->irq_vecs[irq_num].handler = isr;
293         chip->irq_vecs[irq_num].isr_data = isr_data;
294         retval = ioctl(chip->intfd, ROC_BPHY_IOC_SET_BPHY_HANDLER, &irq_usr);
295         if (retval != 0) {
296                 roc_bphy_irq_stack_remove(curr_cpu);
297                 chip->irq_vecs[irq_num].handler = NULL;
298                 chip->irq_vecs[irq_num].handler_cpu = -1;
299         } else {
300                 chip->n_handlers++;
301         }
302
303         rc = pthread_setaffinity_np(pthread_self(), sizeof(orig_cpuset),
304                                     &orig_cpuset);
305         if (rc < 0)
306                 plt_warn("Failed to restore affinity mask");
307
308         return retval;
309 }
310
311 bool
312 roc_bphy_intr_available(struct roc_bphy_irq_chip *irq_chip, int irq_num)
313 {
314         if (irq_num < 0 || (uint64_t)irq_num >= irq_chip->max_irq)
315                 return false;
316
317         return irq_chip->avail_irq_bmask & BIT(irq_num);
318 }