4b389fbcce158f7c8480d68e7205c6d1f76a2fa5
[dpdk.git] / lib / librte_eal / bsdapp / contigmem / contigmem.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <sys/cdefs.h>
6 __FBSDID("$FreeBSD$");
7
8 #include <sys/param.h>
9 #include <sys/bio.h>
10 #include <sys/bus.h>
11 #include <sys/conf.h>
12 #include <sys/kernel.h>
13 #include <sys/malloc.h>
14 #include <sys/module.h>
15 #include <sys/proc.h>
16 #include <sys/rwlock.h>
17 #include <sys/systm.h>
18 #include <sys/sysctl.h>
19
20 #include <machine/bus.h>
21
22 #include <vm/vm.h>
23 #include <vm/pmap.h>
24 #include <vm/vm_param.h>
25 #include <vm/vm_object.h>
26 #include <vm/vm_page.h>
27 #include <vm/vm_pager.h>
28 #include <vm/vm_phys.h>
29
30 struct contigmem_buffer {
31         void           *addr;
32         int             refcnt;
33         struct mtx      mtx;
34 };
35
36 struct contigmem_vm_handle {
37         int             buffer_index;
38 };
39
40 static int              contigmem_load(void);
41 static int              contigmem_unload(void);
42 static int              contigmem_physaddr(SYSCTL_HANDLER_ARGS);
43
44 static d_mmap_single_t  contigmem_mmap_single;
45 static d_open_t         contigmem_open;
46 static d_close_t        contigmem_close;
47
48 static int              contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
49 static int64_t          contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
50
51 static eventhandler_tag contigmem_eh_tag;
52 static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
53 static struct cdev     *contigmem_cdev = NULL;
54 static int              contigmem_refcnt;
55
56 TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
57 TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
58
59 static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
60
61 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
62         &contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
63 SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
64         &contigmem_buffer_size, 0, "Size of each contiguous buffer");
65 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
66         &contigmem_refcnt, 0, "Number of references to contigmem");
67
68 static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
69         "physaddr");
70
71 MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
72
73 static int contigmem_modevent(module_t mod, int type, void *arg)
74 {
75         int error = 0;
76
77         switch (type) {
78         case MOD_LOAD:
79                 error = contigmem_load();
80                 break;
81         case MOD_UNLOAD:
82                 error = contigmem_unload();
83                 break;
84         default:
85                 break;
86         }
87
88         return error;
89 }
90
91 moduledata_t contigmem_mod = {
92         "contigmem",
93         (modeventhand_t)contigmem_modevent,
94         0
95 };
96
97 DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
98 MODULE_VERSION(contigmem, 1);
99
100 static struct cdevsw contigmem_ops = {
101         .d_name         = "contigmem",
102         .d_version      = D_VERSION,
103         .d_flags        = D_TRACKCLOSE,
104         .d_mmap_single  = contigmem_mmap_single,
105         .d_open         = contigmem_open,
106         .d_close        = contigmem_close,
107 };
108
109 static int
110 contigmem_load()
111 {
112         char index_string[8], description[32];
113         int  i, error = 0;
114         void *addr;
115
116         if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
117                 printf("%d buffers requested is greater than %d allowed\n",
118                                 contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
119                 error = EINVAL;
120                 goto error;
121         }
122
123         if (contigmem_buffer_size < PAGE_SIZE ||
124                         (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
125                 printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
126                                 "power of two\n", contigmem_buffer_size);
127                 error = EINVAL;
128                 goto error;
129         }
130
131         for (i = 0; i < contigmem_num_buffers; i++) {
132                 addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
133                         0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
134                 if (addr == NULL) {
135                         printf("contigmalloc failed for buffer %d\n", i);
136                         error = ENOMEM;
137                         goto error;
138                 }
139
140                 printf("%2u: virt=%p phys=%p\n", i, addr,
141                         (void *)pmap_kextract((vm_offset_t)addr));
142
143                 mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
144                 contigmem_buffers[i].addr = addr;
145                 contigmem_buffers[i].refcnt = 0;
146
147                 snprintf(index_string, sizeof(index_string), "%d", i);
148                 snprintf(description, sizeof(description),
149                                 "phys addr for buffer %d", i);
150                 SYSCTL_ADD_PROC(NULL,
151                                 &SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
152                                 index_string, CTLTYPE_U64 | CTLFLAG_RD,
153                                 (void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
154                                 description);
155         }
156
157         contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
158                         GID_WHEEL, 0600, "contigmem");
159
160         return 0;
161
162 error:
163         for (i = 0; i < contigmem_num_buffers; i++) {
164                 if (contigmem_buffers[i].addr != NULL)
165                         contigfree(contigmem_buffers[i].addr,
166                                 contigmem_buffer_size, M_CONTIGMEM);
167                 if (mtx_initialized(&contigmem_buffers[i].mtx))
168                         mtx_destroy(&contigmem_buffers[i].mtx);
169         }
170
171         return error;
172 }
173
174 static int
175 contigmem_unload()
176 {
177         int i;
178
179         if (contigmem_refcnt > 0)
180                 return EBUSY;
181
182         if (contigmem_cdev != NULL)
183                 destroy_dev(contigmem_cdev);
184
185         if (contigmem_eh_tag != NULL)
186                 EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
187
188         for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
189                 if (contigmem_buffers[i].addr != NULL)
190                         contigfree(contigmem_buffers[i].addr,
191                                 contigmem_buffer_size, M_CONTIGMEM);
192                 if (mtx_initialized(&contigmem_buffers[i].mtx))
193                         mtx_destroy(&contigmem_buffers[i].mtx);
194         }
195
196         return 0;
197 }
198
199 static int
200 contigmem_physaddr(SYSCTL_HANDLER_ARGS)
201 {
202         uint64_t        physaddr;
203         int             index = (int)(uintptr_t)arg1;
204
205         physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
206         return sysctl_handle_64(oidp, &physaddr, 0, req);
207 }
208
209 static int
210 contigmem_open(struct cdev *cdev, int fflags, int devtype,
211                 struct thread *td)
212 {
213
214         atomic_add_int(&contigmem_refcnt, 1);
215
216         return 0;
217 }
218
219 static int
220 contigmem_close(struct cdev *cdev, int fflags, int devtype,
221                 struct thread *td)
222 {
223
224         atomic_subtract_int(&contigmem_refcnt, 1);
225
226         return 0;
227 }
228
229 static int
230 contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
231                 vm_ooffset_t foff, struct ucred *cred, u_short *color)
232 {
233         struct contigmem_vm_handle *vmh = handle;
234         struct contigmem_buffer *buf;
235
236         buf = &contigmem_buffers[vmh->buffer_index];
237
238         atomic_add_int(&contigmem_refcnt, 1);
239
240         mtx_lock(&buf->mtx);
241         if (buf->refcnt == 0)
242                 memset(buf->addr, 0, contigmem_buffer_size);
243         buf->refcnt++;
244         mtx_unlock(&buf->mtx);
245
246         return 0;
247 }
248
249 static void
250 contigmem_cdev_pager_dtor(void *handle)
251 {
252         struct contigmem_vm_handle *vmh = handle;
253         struct contigmem_buffer *buf;
254
255         buf = &contigmem_buffers[vmh->buffer_index];
256
257         mtx_lock(&buf->mtx);
258         buf->refcnt--;
259         mtx_unlock(&buf->mtx);
260
261         free(vmh, M_CONTIGMEM);
262
263         atomic_subtract_int(&contigmem_refcnt, 1);
264 }
265
266 static int
267 contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
268                 vm_page_t *mres)
269 {
270         vm_paddr_t paddr;
271         vm_page_t m_paddr, page;
272         vm_memattr_t memattr, memattr1;
273
274         memattr = object->memattr;
275
276         VM_OBJECT_WUNLOCK(object);
277
278         paddr = offset;
279
280         m_paddr = vm_phys_paddr_to_vm_page(paddr);
281         if (m_paddr != NULL) {
282                 memattr1 = pmap_page_get_memattr(m_paddr);
283                 if (memattr1 != memattr)
284                         memattr = memattr1;
285         }
286
287         if (((*mres)->flags & PG_FICTITIOUS) != 0) {
288                 /*
289                  * If the passed in result page is a fake page, update it with
290                  * the new physical address.
291                  */
292                 page = *mres;
293                 VM_OBJECT_WLOCK(object);
294                 vm_page_updatefake(page, paddr, memattr);
295         } else {
296                 vm_page_t mret;
297                 /*
298                  * Replace the passed in reqpage page with our own fake page and
299                  * free up the original page.
300                  */
301                 page = vm_page_getfake(paddr, memattr);
302                 VM_OBJECT_WLOCK(object);
303                 mret = vm_page_replace(page, object, (*mres)->pindex);
304                 KASSERT(mret == *mres,
305                     ("invalid page replacement, old=%p, ret=%p", *mres, mret));
306                 vm_page_lock(mret);
307                 vm_page_free(mret);
308                 vm_page_unlock(mret);
309                 *mres = page;
310         }
311
312         page->valid = VM_PAGE_BITS_ALL;
313
314         return VM_PAGER_OK;
315 }
316
317 static struct cdev_pager_ops contigmem_cdev_pager_ops = {
318         .cdev_pg_ctor = contigmem_cdev_pager_ctor,
319         .cdev_pg_dtor = contigmem_cdev_pager_dtor,
320         .cdev_pg_fault = contigmem_cdev_pager_fault,
321 };
322
323 static int
324 contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
325                 struct vm_object **obj, int nprot)
326 {
327         struct contigmem_vm_handle *vmh;
328         uint64_t buffer_index;
329
330         /*
331          * The buffer index is encoded in the offset.  Divide the offset by
332          *  PAGE_SIZE to get the index of the buffer requested by the user
333          *  app.
334          */
335         buffer_index = *offset / PAGE_SIZE;
336         if (buffer_index >= contigmem_num_buffers)
337                 return EINVAL;
338
339         if (size > contigmem_buffer_size)
340                 return EINVAL;
341
342         vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
343         if (vmh == NULL)
344                 return ENOMEM;
345         vmh->buffer_index = buffer_index;
346
347         *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
348         *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
349                         size, nprot, *offset, curthread->td_ucred);
350
351         return 0;
352 }