1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/kernel.h>
13 #include <sys/malloc.h>
14 #include <sys/module.h>
17 #include <sys/rwlock.h>
18 #include <sys/mutex.h>
19 #include <sys/systm.h>
20 #include <sys/sysctl.h>
21 #include <sys/vmmeter.h>
22 #include <sys/eventhandler.h>
24 #include <machine/bus.h>
28 #include <vm/vm_param.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_pager.h>
32 #include <vm/vm_phys.h>
34 struct contigmem_buffer {
40 struct contigmem_vm_handle {
44 static int contigmem_load(void);
45 static int contigmem_unload(void);
46 static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
48 static d_mmap_single_t contigmem_mmap_single;
49 static d_open_t contigmem_open;
50 static d_close_t contigmem_close;
52 static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
53 static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
55 static eventhandler_tag contigmem_eh_tag;
56 static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
57 static struct cdev *contigmem_cdev = NULL;
58 static int contigmem_refcnt;
60 TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
61 TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
63 static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
65 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
66 &contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
67 SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
68 &contigmem_buffer_size, 0, "Size of each contiguous buffer");
69 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
70 &contigmem_refcnt, 0, "Number of references to contigmem");
72 static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
75 MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
77 static int contigmem_modevent(module_t mod, int type, void *arg)
83 error = contigmem_load();
86 error = contigmem_unload();
95 moduledata_t contigmem_mod = {
97 (modeventhand_t)contigmem_modevent,
101 DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
102 MODULE_VERSION(contigmem, 1);
104 static struct cdevsw contigmem_ops = {
105 .d_name = "contigmem",
106 .d_version = D_VERSION,
107 .d_flags = D_TRACKCLOSE,
108 .d_mmap_single = contigmem_mmap_single,
109 .d_open = contigmem_open,
110 .d_close = contigmem_close,
116 char index_string[8], description[32];
120 if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
121 printf("%d buffers requested is greater than %d allowed\n",
122 contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
127 if (contigmem_buffer_size < PAGE_SIZE ||
128 (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
129 printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
130 "power of two\n", contigmem_buffer_size);
135 for (i = 0; i < contigmem_num_buffers; i++) {
136 addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
137 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
139 printf("contigmalloc failed for buffer %d\n", i);
144 printf("%2u: virt=%p phys=%p\n", i, addr,
145 (void *)pmap_kextract((vm_offset_t)addr));
147 mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
148 contigmem_buffers[i].addr = addr;
149 contigmem_buffers[i].refcnt = 0;
151 snprintf(index_string, sizeof(index_string), "%d", i);
152 snprintf(description, sizeof(description),
153 "phys addr for buffer %d", i);
154 SYSCTL_ADD_PROC(NULL,
155 &SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
156 index_string, CTLTYPE_U64 | CTLFLAG_RD,
157 (void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
161 contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
162 GID_WHEEL, 0600, "contigmem");
167 for (i = 0; i < contigmem_num_buffers; i++) {
168 if (contigmem_buffers[i].addr != NULL) {
169 contigfree(contigmem_buffers[i].addr,
170 contigmem_buffer_size, M_CONTIGMEM);
171 contigmem_buffers[i].addr = NULL;
173 if (mtx_initialized(&contigmem_buffers[i].mtx))
174 mtx_destroy(&contigmem_buffers[i].mtx);
185 if (contigmem_refcnt > 0)
188 if (contigmem_cdev != NULL)
189 destroy_dev(contigmem_cdev);
191 if (contigmem_eh_tag != NULL)
192 EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
194 for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
195 if (contigmem_buffers[i].addr != NULL)
196 contigfree(contigmem_buffers[i].addr,
197 contigmem_buffer_size, M_CONTIGMEM);
198 if (mtx_initialized(&contigmem_buffers[i].mtx))
199 mtx_destroy(&contigmem_buffers[i].mtx);
206 contigmem_physaddr(SYSCTL_HANDLER_ARGS)
209 int index = (int)(uintptr_t)arg1;
211 physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
212 return sysctl_handle_64(oidp, &physaddr, 0, req);
216 contigmem_open(struct cdev *cdev, int fflags, int devtype,
220 atomic_add_int(&contigmem_refcnt, 1);
226 contigmem_close(struct cdev *cdev, int fflags, int devtype,
230 atomic_subtract_int(&contigmem_refcnt, 1);
236 contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
237 vm_ooffset_t foff, struct ucred *cred, u_short *color)
239 struct contigmem_vm_handle *vmh = handle;
240 struct contigmem_buffer *buf;
242 buf = &contigmem_buffers[vmh->buffer_index];
244 atomic_add_int(&contigmem_refcnt, 1);
247 if (buf->refcnt == 0)
248 memset(buf->addr, 0, contigmem_buffer_size);
250 mtx_unlock(&buf->mtx);
256 contigmem_cdev_pager_dtor(void *handle)
258 struct contigmem_vm_handle *vmh = handle;
259 struct contigmem_buffer *buf;
261 buf = &contigmem_buffers[vmh->buffer_index];
265 mtx_unlock(&buf->mtx);
267 free(vmh, M_CONTIGMEM);
269 atomic_subtract_int(&contigmem_refcnt, 1);
273 contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
277 vm_page_t m_paddr, page;
278 vm_memattr_t memattr, memattr1;
280 memattr = object->memattr;
282 VM_OBJECT_WUNLOCK(object);
286 m_paddr = vm_phys_paddr_to_vm_page(paddr);
287 if (m_paddr != NULL) {
288 memattr1 = pmap_page_get_memattr(m_paddr);
289 if (memattr1 != memattr)
293 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
295 * If the passed in result page is a fake page, update it with
296 * the new physical address.
299 VM_OBJECT_WLOCK(object);
300 vm_page_updatefake(page, paddr, memattr);
303 * Replace the passed in reqpage page with our own fake page and
304 * free up the original page.
306 page = vm_page_getfake(paddr, memattr);
307 VM_OBJECT_WLOCK(object);
308 #if __FreeBSD__ >= 13
309 vm_page_replace(page, object, (*mres)->pindex, *mres);
311 vm_page_t mret = vm_page_replace(page, object, (*mres)->pindex);
312 KASSERT(mret == *mres,
313 ("invalid page replacement, old=%p, ret=%p", *mres, mret));
316 vm_page_unlock(mret);
321 page->valid = VM_PAGE_BITS_ALL;
326 static struct cdev_pager_ops contigmem_cdev_pager_ops = {
327 .cdev_pg_ctor = contigmem_cdev_pager_ctor,
328 .cdev_pg_dtor = contigmem_cdev_pager_dtor,
329 .cdev_pg_fault = contigmem_cdev_pager_fault,
333 contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
334 struct vm_object **obj, int nprot)
336 struct contigmem_vm_handle *vmh;
337 uint64_t buffer_index;
340 * The buffer index is encoded in the offset. Divide the offset by
341 * PAGE_SIZE to get the index of the buffer requested by the user
344 buffer_index = *offset / PAGE_SIZE;
345 if (buffer_index >= contigmem_num_buffers)
348 if (size > contigmem_buffer_size)
351 vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
354 vmh->buffer_index = buffer_index;
356 *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
357 *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
358 size, nprot, *offset, curthread->td_ucred);