4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <xen/xen-compat.h>
44 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
49 #include <xen/sys/gntalloc.h>
51 #include <rte_common.h>
52 #include <rte_string_fns.h>
54 #include "rte_xen_lib.h"
57 * The grant node format in xenstore for vring/mpool is:
58 * 0_rx_vring_gref = "gref1#, gref2#, gref3#"
59 * 0_mempool_gref = "gref1#, gref2#, gref3#"
60 * each gref# is a grant reference for a shared page.
61 * In each shared page, we store the grant_node_item items.
63 struct grant_node_item {
66 } __attribute__((packed));
68 /* fd for xen_gntalloc driver, used to allocate grant pages*/
71 /* xenstore path for local domain, now it is '/local/domain/domid/' */
72 static char *dompath = NULL;
73 /* handle to xenstore read/write operations */
74 static struct xs_handle *xs = NULL;
77 * Reserve a virtual address space.
78 * On success, returns the pointer. On failure, returns NULL.
81 get_xen_virtual(size_t size, size_t page_sz)
84 uintptr_t aligned_addr;
86 addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
87 if (addr == MAP_FAILED) {
88 RTE_LOG(ERR, PMD, "failed get a virtual area\n");
92 aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
93 addr = (void *)(aligned_addr);
99 * Get the physical address for virtual memory starting at va.
102 get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
108 ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
109 nb = pg_num * sizeof(*pa);
111 if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 ||
112 (rc = pread(fd, pa, nb, ofs)) < 0 ||
114 RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' "
115 "at offset %zu, error code: %d\n",
116 __func__, nb, PAGEMAP_FNAME, ofs, errno);
121 for (i = 0; i != pg_num; i++)
122 pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
130 gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR);
131 return (gntalloc_fd != -1) ? 0 : -1;
137 if (gntalloc_fd != -1)
143 gntalloc(size_t size, uint32_t *gref, uint64_t *start_index)
145 int page_size = getpagesize();
149 struct ioctl_gntalloc_alloc_gref *arg;
150 struct ioctl_gntalloc_dealloc_gref arg_d;
152 if (size % page_size) {
153 RTE_LOG(ERR, PMD, "%s: %zu isn't multiple of page size\n",
158 pg_num = size / page_size;
159 arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t));
162 arg->domid = DOM0_DOMID;
163 arg->flags = GNTALLOC_FLAG_WRITABLE;
166 rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg);
168 RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
173 va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gntalloc_fd, arg->index);
174 if (va == MAP_FAILED) {
175 RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
176 arg_d.count = pg_num;
177 arg_d.index = arg->index;
178 ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d);
184 for (i = 0; i < pg_num; i++) {
185 gref[i] = arg->gref_ids[i];
189 *start_index = arg->index;
197 grefwatch_from_alloc(uint32_t *gref, void **pptr)
201 int pg_size = getpagesize();
202 struct ioctl_gntalloc_alloc_gref arg = {
204 .flags = GNTALLOC_FLAG_WRITABLE,
207 struct ioctl_gntalloc_dealloc_gref arg_d;
208 struct ioctl_gntalloc_unmap_notify notify = {
209 .action = UNMAP_NOTIFY_CLEAR_BYTE
212 rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg);
214 RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
218 ptr = (void *)mmap(NULL, pg_size, PROT_READ|PROT_WRITE, MAP_SHARED, gntalloc_fd, arg.index);
219 arg_d.index = arg.index;
221 if (ptr == MAP_FAILED) {
222 RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
223 ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
229 *gref = arg.gref_ids[0];
231 notify.index = arg.index;
232 rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, ¬ify);
234 RTE_LOG(ERR, PMD, "%s: unmap notify failed\n", __func__);
235 munmap(ptr, pg_size);
236 ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
244 gntfree(void *va, size_t sz, uint64_t start_index)
246 struct ioctl_gntalloc_dealloc_gref arg_d;
250 arg_d.count = sz / getpagesize();
251 arg_d.index = start_index;
252 ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
257 xenstore_cleanup(void)
259 char store_path[PATH_MAX] = {0};
261 if (snprintf(store_path, sizeof(store_path),
262 "%s%s", dompath, DPDK_XENSTORE_NODE) == -1)
265 if (xs_rm(xs, XBT_NULL, store_path) == false) {
266 RTE_LOG(ERR, PMD, "%s: failed cleanup node\n", __func__);
276 unsigned int len, domid;
278 static int cleanup = 0;
281 xs = xs_domain_open();
283 RTE_LOG(ERR, PMD,"%s: xs_domain_open failed\n", __func__);
286 buf = xs_read(xs, XBT_NULL, "domid", &len);
288 RTE_LOG(ERR, PMD, "%s: failed read domid\n", __func__);
292 domid = strtoul(buf, &end, 0);
293 if (errno != 0 || end == NULL || end == buf || domid == 0)
296 RTE_LOG(INFO, PMD, "retrieved dom ID = %d\n", domid);
298 dompath = xs_get_domain_path(xs, domid);
302 xs_transaction_start(xs); /* When to stop transaction */
305 if (xenstore_cleanup())
314 xenstore_write(const char *key_str, const char *val_str)
316 char grant_path[PATH_MAX];
320 RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
323 rv = snprintf(grant_path, sizeof(grant_path), "%s%s", dompath, key_str);
325 RTE_LOG(ERR, PMD, "%s: snprintf %s %s failed\n",
326 __func__, dompath, key_str);
329 len = strnlen(val_str, PATH_MAX);
331 if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) {
332 RTE_LOG(ERR, PMD, "%s: xs_write failed\n", __func__);
340 grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size)
342 uint64_t start_index;
346 uint32_t count, entries_per_pg;
347 uint32_t i, j = 0, k = 0;;
350 char tmp_str[PATH_MAX] = {0};
353 pg_size = getpagesize();
354 if (rte_is_power_of_2(pg_size) == 0) {
357 pg_shift = rte_bsf32(pg_size);
358 if (pg_size % sizeof(struct grant_node_item)) {
359 RTE_LOG(ERR, PMD, "pg_size isn't a multiple of grant node item\n");
363 entries_per_pg = pg_size / sizeof(struct grant_node_item);
364 count = (pg_num + entries_per_pg - 1 ) / entries_per_pg;
365 gref_tmp = malloc(count * sizeof(uint32_t));
366 if (gref_tmp == NULL)
368 ptr = gntalloc(pg_size * count, gref_tmp, &start_index);
370 RTE_LOG(ERR, PMD, "%s: gntalloc error of %d pages\n", __func__, count);
377 rv = snprintf(val_str, str_size, "%u", gref_tmp[k]);
380 snprintf(tmp_str, PATH_MAX, "%s", val_str);
381 rv = snprintf(val_str, str_size, "%s,%u", tmp_str, gref_tmp[k]);
387 for (i = 0; i < entries_per_pg && j < pg_num ; i++) {
388 ((struct grant_node_item *)ptr)->gref = gref_arr[j];
389 ((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift;
390 ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item));
395 gntfree(ptr, pg_size * count, start_index);
404 grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx)
406 char key_str[PATH_MAX] = {0};
407 char val_str[PATH_MAX] = {0};
409 if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
413 if (snprintf(key_str, sizeof(key_str),
414 DPDK_XENSTORE_PATH"%d"MEMPOOL_XENSTORE_STR, mempool_idx) == -1)
416 if (xenstore_write(key_str, val_str) == -1)
419 if (snprintf(key_str, sizeof(key_str),
420 DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1)
422 if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1)
424 if (xenstore_write(key_str, val_str) == -1)