4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
45 #include <sys/rwlock.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
49 #include <machine/bus.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vm_phys.h>
59 struct contigmem_buffer {
65 struct contigmem_vm_handle {
69 static int contigmem_load(void);
70 static int contigmem_unload(void);
71 static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
73 static d_mmap_single_t contigmem_mmap_single;
74 static d_open_t contigmem_open;
75 static d_close_t contigmem_close;
77 static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
78 static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
80 static eventhandler_tag contigmem_eh_tag;
81 static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
82 static struct cdev *contigmem_cdev = NULL;
83 static int contigmem_refcnt;
85 TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
86 TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
88 static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
90 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
91 &contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
92 SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
93 &contigmem_buffer_size, 0, "Size of each contiguous buffer");
94 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
95 &contigmem_refcnt, 0, "Number of references to contigmem");
97 static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
100 MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
102 static int contigmem_modevent(module_t mod, int type, void *arg)
108 error = contigmem_load();
111 error = contigmem_unload();
120 moduledata_t contigmem_mod = {
122 (modeventhand_t)contigmem_modevent,
126 DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
127 MODULE_VERSION(contigmem, 1);
129 static struct cdevsw contigmem_ops = {
130 .d_name = "contigmem",
131 .d_version = D_VERSION,
132 .d_flags = D_TRACKCLOSE,
133 .d_mmap_single = contigmem_mmap_single,
134 .d_open = contigmem_open,
135 .d_close = contigmem_close,
141 char index_string[8], description[32];
145 if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
146 printf("%d buffers requested is greater than %d allowed\n",
147 contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
152 if (contigmem_buffer_size < PAGE_SIZE ||
153 (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
154 printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
155 "power of two\n", contigmem_buffer_size);
160 for (i = 0; i < contigmem_num_buffers; i++) {
161 addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
162 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
164 printf("contigmalloc failed for buffer %d\n", i);
169 printf("%2u: virt=%p phys=%p\n", i, addr,
170 (void *)pmap_kextract((vm_offset_t)addr));
172 mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
173 contigmem_buffers[i].addr = addr;
174 contigmem_buffers[i].refcnt = 0;
176 snprintf(index_string, sizeof(index_string), "%d", i);
177 snprintf(description, sizeof(description),
178 "phys addr for buffer %d", i);
179 SYSCTL_ADD_PROC(NULL,
180 &SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
181 index_string, CTLTYPE_U64 | CTLFLAG_RD,
182 (void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
186 contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
187 GID_WHEEL, 0600, "contigmem");
192 for (i = 0; i < contigmem_num_buffers; i++) {
193 if (contigmem_buffers[i].addr != NULL)
194 contigfree(contigmem_buffers[i].addr,
195 contigmem_buffer_size, M_CONTIGMEM);
196 if (mtx_initialized(&contigmem_buffers[i].mtx))
197 mtx_destroy(&contigmem_buffers[i].mtx);
208 if (contigmem_refcnt > 0)
211 if (contigmem_cdev != NULL)
212 destroy_dev(contigmem_cdev);
214 if (contigmem_eh_tag != NULL)
215 EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
217 for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
218 if (contigmem_buffers[i].addr != NULL)
219 contigfree(contigmem_buffers[i].addr,
220 contigmem_buffer_size, M_CONTIGMEM);
221 if (mtx_initialized(&contigmem_buffers[i].mtx))
222 mtx_destroy(&contigmem_buffers[i].mtx);
229 contigmem_physaddr(SYSCTL_HANDLER_ARGS)
232 int index = (int)(uintptr_t)arg1;
234 physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
235 return sysctl_handle_64(oidp, &physaddr, 0, req);
239 contigmem_open(struct cdev *cdev, int fflags, int devtype,
243 atomic_add_int(&contigmem_refcnt, 1);
249 contigmem_close(struct cdev *cdev, int fflags, int devtype,
253 atomic_subtract_int(&contigmem_refcnt, 1);
259 contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
260 vm_ooffset_t foff, struct ucred *cred, u_short *color)
262 struct contigmem_vm_handle *vmh = handle;
263 struct contigmem_buffer *buf;
265 buf = &contigmem_buffers[vmh->buffer_index];
267 atomic_add_int(&contigmem_refcnt, 1);
270 if (buf->refcnt == 0)
271 memset(buf->addr, 0, contigmem_buffer_size);
273 mtx_unlock(&buf->mtx);
279 contigmem_cdev_pager_dtor(void *handle)
281 struct contigmem_vm_handle *vmh = handle;
282 struct contigmem_buffer *buf;
284 buf = &contigmem_buffers[vmh->buffer_index];
288 mtx_unlock(&buf->mtx);
290 free(vmh, M_CONTIGMEM);
292 atomic_subtract_int(&contigmem_refcnt, 1);
296 contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
300 vm_page_t m_paddr, page;
301 vm_memattr_t memattr, memattr1;
303 memattr = object->memattr;
305 VM_OBJECT_WUNLOCK(object);
309 m_paddr = vm_phys_paddr_to_vm_page(paddr);
310 if (m_paddr != NULL) {
311 memattr1 = pmap_page_get_memattr(m_paddr);
312 if (memattr1 != memattr)
316 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
318 * If the passed in result page is a fake page, update it with
319 * the new physical address.
322 VM_OBJECT_WLOCK(object);
323 vm_page_updatefake(page, paddr, memattr);
327 * Replace the passed in reqpage page with our own fake page and
328 * free up the original page.
330 page = vm_page_getfake(paddr, memattr);
331 VM_OBJECT_WLOCK(object);
332 mret = vm_page_replace(page, object, (*mres)->pindex);
333 KASSERT(mret == *mres,
334 ("invalid page replacement, old=%p, ret=%p", *mres, mret));
337 vm_page_unlock(mret);
341 page->valid = VM_PAGE_BITS_ALL;
346 static struct cdev_pager_ops contigmem_cdev_pager_ops = {
347 .cdev_pg_ctor = contigmem_cdev_pager_ctor,
348 .cdev_pg_dtor = contigmem_cdev_pager_dtor,
349 .cdev_pg_fault = contigmem_cdev_pager_fault,
353 contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
354 struct vm_object **obj, int nprot)
356 struct contigmem_vm_handle *vmh;
357 uint64_t buffer_index;
360 * The buffer index is encoded in the offset. Divide the offset by
361 * PAGE_SIZE to get the index of the buffer requested by the user
364 buffer_index = *offset / PAGE_SIZE;
365 if (buffer_index >= contigmem_num_buffers)
368 if (size > contigmem_buffer_size)
371 vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
374 vmh->buffer_index = buffer_index;
376 *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
377 *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
378 size, nprot, *offset, curthread->td_ucred);