remove trailing whitespaces
[dpdk.git] / lib / librte_ivshmem / rte_ivshmem.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <fcntl.h>
34 #include <limits.h>
35 #include <unistd.h>
36 #include <sys/mman.h>
37 #include <string.h>
38 #include <stdio.h>
39
40 #include <rte_eal_memconfig.h>
41 #include <rte_memory.h>
42 #include <rte_ivshmem.h>
43 #include <rte_string_fns.h>
44 #include <rte_common.h>
45 #include <rte_log.h>
46 #include <rte_debug.h>
47 #include <rte_spinlock.h>
48 #include <rte_common.h>
49 #include <rte_malloc.h>
50
51 #include "rte_ivshmem.h"
52
53 #define IVSHMEM_CONFIG_FILE_FMT "/var/run/.dpdk_ivshmem_metadata_%s"
54 #define IVSHMEM_QEMU_CMD_LINE_HEADER_FMT "-device ivshmem,size=%" PRIu64 "M,shm=fd%s"
55 #define IVSHMEM_QEMU_CMD_FD_FMT ":%s:0x%" PRIx64 ":0x%" PRIx64
56 #define IVSHMEM_QEMU_CMDLINE_BUFSIZE 1024
57 #define IVSHMEM_MAX_PAGES (1 << 12)
58 #define adjacent(x,y) (((x).phys_addr+(x).len)==(y).phys_addr)
59 #define METADATA_SIZE_ALIGNED \
60         (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
61
62 #define GET_PAGEMAP_ADDR(in,addr,dlm,err)    \
63 {                                      \
64         char *end;                         \
65         errno = 0;                         \
66         addr = strtoull((in), &end, 16);   \
67         if (errno != 0 || *end != (dlm)) { \
68                 RTE_LOG(ERR, EAL, err);        \
69                 goto error;                    \
70         }                                  \
71         (in) = end + 1;                    \
72 }
73
74 static int pagesz;
75
76 struct memseg_cache_entry {
77         char filepath[PATH_MAX];
78         uint64_t offset;
79         uint64_t len;
80 };
81
82 struct ivshmem_config {
83         struct rte_ivshmem_metadata * metadata;
84         struct memseg_cache_entry memseg_cache[IVSHMEM_MAX_PAGES];
85                 /**< account for multiple files per segment case */
86         struct flock lock;
87         rte_spinlock_t sl;
88 };
89
90 static struct ivshmem_config
91 ivshmem_global_config[RTE_LIBRTE_IVSHMEM_MAX_METADATA_FILES];
92
93 static rte_spinlock_t global_cfg_sl;
94
95 static struct ivshmem_config *
96 get_config_by_name(const char * name)
97 {
98         struct rte_ivshmem_metadata * config;
99         unsigned i;
100
101         for (i = 0; i < RTE_DIM(ivshmem_global_config); i++) {
102                 config = ivshmem_global_config[i].metadata;
103                 if (config == NULL)
104                         return NULL;
105                 if (strncmp(name, config->name, IVSHMEM_NAME_LEN) == 0)
106                         return &ivshmem_global_config[i];
107         }
108
109         return NULL;
110 }
111
112 static int
113 overlap(const struct rte_memzone * s1, const struct rte_memzone * s2)
114 {
115         uint64_t start1, end1, start2, end2;
116
117         start1 = s1->addr_64;
118         end1 = s1->addr_64 + s1->len;
119         start2 = s2->addr_64;
120         end2 = s2->addr_64 + s2->len;
121
122         if (start1 >= start2 && start1 < end2)
123                 return 1;
124         if (start2 >= start1 && start2 < end1)
125                 return 1;
126
127         return 0;
128 }
129
130 static struct rte_memzone *
131 get_memzone_by_addr(const void * addr)
132 {
133         struct rte_memzone * tmp, * mz;
134         struct rte_mem_config * mcfg;
135         int i;
136
137         mcfg = rte_eal_get_configuration()->mem_config;
138         mz = NULL;
139
140         /* find memzone for the ring */
141         for (i = 0; i < RTE_MAX_MEMZONE; i++) {
142                 tmp = &mcfg->memzone[i];
143
144                 if (tmp->addr_64 == (uint64_t) addr) {
145                         mz = tmp;
146                         break;
147                 }
148         }
149
150         return mz;
151 }
152
153 static int
154 entry_compare(const void * a, const void * b)
155 {
156         const struct rte_ivshmem_metadata_entry * e1 =
157                         (const struct rte_ivshmem_metadata_entry*) a;
158         const struct rte_ivshmem_metadata_entry * e2 =
159                         (const struct rte_ivshmem_metadata_entry*) b;
160
161         /* move unallocated zones to the end */
162         if (e1->mz.addr == NULL && e2->mz.addr == NULL)
163                 return 0;
164         if (e1->mz.addr == 0)
165                 return 1;
166         if (e2->mz.addr == 0)
167                 return -1;
168
169         return e1->mz.phys_addr > e2->mz.phys_addr;
170 }
171
172 /* fills hugepage cache entry for a given start virt_addr */
173 static int
174 get_hugefile_by_virt_addr(uint64_t virt_addr, struct memseg_cache_entry * e)
175 {
176         uint64_t start_addr, end_addr;
177         char *start,*path_end;
178         char buf[PATH_MAX*2];
179         FILE *f;
180
181         start = NULL;
182         path_end = NULL;
183         start_addr = 0;
184
185         memset(e->filepath, 0, sizeof(e->filepath));
186
187         /* open /proc/self/maps */
188         f = fopen("/proc/self/maps", "r");
189         if (f == NULL) {
190                 RTE_LOG(ERR, EAL, "cannot open /proc/self/maps!\n");
191                 return -1;
192         }
193
194         /* parse maps */
195         while (fgets(buf, sizeof(buf), f) != NULL) {
196
197                 /* get endptr to end of start addr */
198                 start = buf;
199
200                 GET_PAGEMAP_ADDR(start,start_addr,'-',
201                                 "Cannot find start address in maps!\n");
202
203                 /* if start address is bigger than our address, skip */
204                 if (start_addr > virt_addr)
205                         continue;
206
207                 GET_PAGEMAP_ADDR(start,end_addr,' ',
208                                 "Cannot find end address in maps!\n");
209
210                 /* if end address is less than our address, skip */
211                 if (end_addr <= virt_addr)
212                         continue;
213
214                 /* find where the path starts */
215                 start = strstr(start, "/");
216
217                 if (start == NULL)
218                         continue;
219
220                 /* at this point, we know that this is our map.
221                  * now let's find the file */
222                 path_end = strstr(start, "\n");
223                 break;
224         }
225
226         if (path_end == NULL) {
227                 RTE_LOG(ERR, EAL, "Hugefile path not found!\n");
228                 goto error;
229         }
230
231         /* calculate offset and copy the file path */
232         rte_snprintf(e->filepath, RTE_PTR_DIFF(path_end, start) + 1, "%s", start);
233
234         e->offset = virt_addr - start_addr;
235
236         fclose(f);
237
238         return 0;
239 error:
240         fclose(f);
241         return -1;
242 }
243
244 /*
245  * This is a complex function. What it does is the following:
246  *  1. Goes through metadata and gets list of hugepages involved
247  *  2. Sorts the hugepages by size (1G first)
248  *  3. Goes through metadata again and writes correct offsets
249  *  4. Goes through pages and finds out their filenames, offsets etc.
250  */
251 static int
252 build_config(struct rte_ivshmem_metadata * metadata)
253 {
254         struct rte_ivshmem_metadata_entry * e_local;
255         struct memseg_cache_entry * ms_local;
256         struct rte_memseg pages[IVSHMEM_MAX_PAGES];
257         struct rte_ivshmem_metadata_entry *entry;
258         struct memseg_cache_entry * c_entry, * prev_entry;
259         struct ivshmem_config * config;
260         unsigned i, j, mz_iter, ms_iter;
261         uint64_t biggest_len;
262         int biggest_idx;
263
264         /* return error if we try to use an unknown config file */
265         config = get_config_by_name(metadata->name);
266         if (config == NULL) {
267                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", metadata->name);
268                 goto fail_e;
269         }
270
271         memset(pages, 0, sizeof(pages));
272
273         e_local = malloc(sizeof(config->metadata->entry));
274         if (e_local == NULL)
275                 goto fail_e;
276         ms_local = malloc(sizeof(config->memseg_cache));
277         if (ms_local == NULL)
278                 goto fail_ms;
279
280
281         /* make local copies before doing anything */
282         memcpy(e_local, config->metadata->entry, sizeof(config->metadata->entry));
283         memcpy(ms_local, config->memseg_cache, sizeof(config->memseg_cache));
284
285         qsort(e_local, RTE_DIM(config->metadata->entry), sizeof(struct rte_ivshmem_metadata_entry),
286                         entry_compare);
287
288         /* first pass - collect all huge pages */
289         for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
290
291                 entry = &e_local[mz_iter];
292
293                 uint64_t start_addr = RTE_ALIGN_FLOOR(entry->mz.addr_64,
294                                 entry->mz.hugepage_sz);
295                 uint64_t offset = entry->mz.addr_64 - start_addr;
296                 uint64_t len = RTE_ALIGN_CEIL(entry->mz.len + offset,
297                                 entry->mz.hugepage_sz);
298
299                 if (entry->mz.addr_64 == 0 || start_addr == 0 || len == 0)
300                         continue;
301
302                 int start_page;
303
304                 /* find first unused page - mz are phys_addr sorted so we don't have to
305                  * look out for holes */
306                 for (i = 0; i < RTE_DIM(pages); i++) {
307
308                         /* skip if we already have this page */
309                         if (pages[i].addr_64 == start_addr) {
310                                 start_addr += entry->mz.hugepage_sz;
311                                 len -= entry->mz.hugepage_sz;
312                                 continue;
313                         }
314                         /* we found a new page */
315                         else if (pages[i].addr_64 == 0) {
316                                 start_page = i;
317                                 break;
318                         }
319                 }
320                 if (i == RTE_DIM(pages)) {
321                         RTE_LOG(ERR, EAL, "Cannot find unused page!\n");
322                         goto fail;
323                 }
324
325                 /* populate however many pages the memzone has */
326                 for (i = start_page; i < RTE_DIM(pages) && len != 0; i++) {
327
328                         pages[i].addr_64 = start_addr;
329                         pages[i].len = entry->mz.hugepage_sz;
330                         start_addr += entry->mz.hugepage_sz;
331                         len -= entry->mz.hugepage_sz;
332                 }
333                 /* if there's still length left */
334                 if (len != 0) {
335                         RTE_LOG(ERR, EAL, "Not enough space for pages!\n");
336                         goto fail;
337                 }
338         }
339
340         /* second pass - sort pages by size */
341         for (i = 0; i < RTE_DIM(pages); i++) {
342
343                 if (pages[i].addr == NULL)
344                         break;
345
346                 biggest_len = 0;
347                 biggest_idx = -1;
348
349                 /*
350                  * browse all entries starting at 'i', and find the
351                  * entry with the smallest addr
352                  */
353                 for (j=i; j< RTE_DIM(pages); j++) {
354                         if (pages[j].addr == NULL)
355                                         break;
356                         if (biggest_len == 0 ||
357                                 pages[j].len > biggest_len) {
358                                 biggest_len = pages[j].len;
359                                 biggest_idx = j;
360                         }
361                 }
362
363                 /* should not happen */
364                 if (biggest_idx == -1) {
365                         RTE_LOG(ERR, EAL, "Error sorting by size!\n");
366                         goto fail;
367                 }
368                 if (i != (unsigned) biggest_idx) {
369                         struct rte_memseg tmp;
370
371                         memcpy(&tmp, &pages[biggest_idx], sizeof(struct rte_memseg));
372
373                         /* we don't want to break contiguousness, so instead of just
374                          * swapping segments, we move all the preceding segments to the
375                          * right and then put the old segment @ biggest_idx in place of
376                          * segment @ i */
377                         for (j = biggest_idx - 1; j >= i; j--) {
378                                 memcpy(&pages[j+1], &pages[j], sizeof(struct rte_memseg));
379                                 memset(&pages[j], 0, sizeof(struct rte_memseg));
380                         }
381
382                         /* put old biggest segment to its new place */
383                         memcpy(&pages[i], &tmp, sizeof(struct rte_memseg));
384                 }
385         }
386
387         /* third pass - write correct offsets */
388         for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
389
390                 uint64_t offset = 0;
391
392                 entry = &e_local[mz_iter];
393
394                 if (entry->mz.addr_64 == 0)
395                         break;
396
397                 /* find page for current memzone */
398                 for (i = 0; i < RTE_DIM(pages); i++) {
399                         /* we found our page */
400                         if (entry->mz.addr_64 >= pages[i].addr_64 &&
401                                         entry->mz.addr_64 < pages[i].addr_64 + pages[i].len) {
402                                 entry->offset = (entry->mz.addr_64 - pages[i].addr_64) +
403                                                 offset;
404                                 break;
405                         }
406                         offset += pages[i].len;
407                 }
408                 if (i == RTE_DIM(pages)) {
409                         RTE_LOG(ERR, EAL, "Page not found!\n");
410                         goto fail;
411                 }
412         }
413
414         ms_iter = 0;
415         prev_entry = NULL;
416
417         /* fourth pass - create proper memseg cache */
418         for (i = 0; i < RTE_DIM(pages) &&
419                         ms_iter <= RTE_DIM(config->memseg_cache); i++) {
420                 if (pages[i].addr_64 == 0)
421                         break;
422
423
424                 if (ms_iter == RTE_DIM(pages)) {
425                         RTE_LOG(ERR, EAL, "The universe has collapsed!\n");
426                         goto fail;
427                 }
428
429                 c_entry = &ms_local[ms_iter];
430                 c_entry->len = pages[i].len;
431
432                 if (get_hugefile_by_virt_addr(pages[i].addr_64, c_entry) < 0)
433                         goto fail;
434
435                 /* if previous entry has the same filename and is contiguous,
436                  * clear current entry and increase previous entry's length
437                  */
438                 if (prev_entry != NULL &&
439                                 strncmp(c_entry->filepath, prev_entry->filepath,
440                                 sizeof(c_entry->filepath)) == 0 &&
441                                 prev_entry->offset + prev_entry->len == c_entry->offset) {
442                         prev_entry->len += pages[i].len;
443                         memset(c_entry, 0, sizeof(struct memseg_cache_entry));
444                 }
445                 else {
446                         prev_entry = c_entry;
447                         ms_iter++;
448                 }
449         }
450
451         /* update current configuration with new valid data */
452         memcpy(config->metadata->entry, e_local, sizeof(config->metadata->entry));
453         memcpy(config->memseg_cache, ms_local, sizeof(config->memseg_cache));
454
455         free(ms_local);
456         free(e_local);
457
458         return 0;
459 fail:
460         free(ms_local);
461 fail_ms:
462         free(e_local);
463 fail_e:
464         return -1;
465 }
466
467 static int
468 add_memzone_to_metadata(const struct rte_memzone * mz,
469                 struct ivshmem_config * config)
470 {
471         struct rte_ivshmem_metadata_entry * entry;
472         unsigned i;
473
474         rte_spinlock_lock(&config->sl);
475
476         /* find free slot in this config */
477         for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
478                 entry = &config->metadata->entry[i];
479
480                 if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
481                         RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
482                         goto fail;
483                 }
484
485                 /* if addr is zero, the memzone is probably free */
486                 if (entry->mz.addr_64 == 0) {
487                         RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
488                                         mz->name, mz->addr, config->metadata->name);
489                         memcpy(&entry->mz, mz, sizeof(struct rte_memzone));
490
491                         /* run config file parser */
492                         if (build_config(config->metadata) < 0)
493                                 goto fail;
494
495                         break;
496                 }
497         }
498
499         /* if we reached the maximum, that means we have no place in config */
500         if (i == RTE_DIM(config->metadata->entry)) {
501                 RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
502                                 config->metadata->name);
503                 goto fail;
504         }
505
506         rte_spinlock_unlock(&config->sl);
507         return 0;
508 fail:
509         rte_spinlock_unlock(&config->sl);
510         return -1;
511 }
512
513 static int
514 add_ring_to_metadata(const struct rte_ring * r,
515                 struct ivshmem_config * config)
516 {
517         struct rte_memzone * mz;
518
519         mz = get_memzone_by_addr(r);
520
521         if (!mz) {
522                 RTE_LOG(ERR, EAL, "Cannot find memzone for ring!\n");
523                 return -1;
524         }
525
526         return add_memzone_to_metadata(mz, config);
527 }
528
529 static int
530 add_mempool_to_metadata(const struct rte_mempool * mp,
531                 struct ivshmem_config * config)
532 {
533         struct rte_memzone * mz;
534         int ret;
535
536         mz = get_memzone_by_addr(mp);
537         ret = 0;
538
539         if (!mz) {
540                 RTE_LOG(ERR, EAL, "Cannot find memzone for mempool!\n");
541                 return -1;
542         }
543
544         /* mempool consists of memzone and ring */
545         ret = add_memzone_to_metadata(mz, config);
546         if (ret < 0)
547                 return -1;
548
549         return add_ring_to_metadata(mp->ring, config);
550 }
551
552 int
553 rte_ivshmem_metadata_add_ring(const struct rte_ring * r, const char * name)
554 {
555         struct ivshmem_config * config;
556
557         if (name == NULL || r == NULL)
558                 return -1;
559
560         config = get_config_by_name(name);
561
562         if (config == NULL) {
563                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
564                 return -1;
565         }
566
567         return add_ring_to_metadata(r, config);
568 }
569
570 int
571 rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz, const char * name)
572 {
573         struct ivshmem_config * config;
574
575         if (name == NULL || mz == NULL)
576                 return -1;
577
578         config = get_config_by_name(name);
579
580         if (config == NULL) {
581                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
582                 return -1;
583         }
584
585         return add_memzone_to_metadata(mz, config);
586 }
587
588 int
589 rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp, const char * name)
590 {
591         struct ivshmem_config * config;
592
593         if (name == NULL || mp == NULL)
594                 return -1;
595
596         config = get_config_by_name(name);
597
598         if (config == NULL) {
599                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
600                 return -1;
601         }
602
603         return add_mempool_to_metadata(mp, config);
604 }
605
606 static inline void
607 ivshmem_config_path(char *buffer, size_t bufflen, const char *name)
608 {
609         rte_snprintf(buffer, bufflen, IVSHMEM_CONFIG_FILE_FMT, name);
610 }
611
612
613
614 static inline
615 void *ivshmem_metadata_create(const char *name, size_t size,
616                 struct flock *lock)
617 {
618         int retval, fd;
619         void *metadata_addr;
620         char pathname[PATH_MAX];
621
622         ivshmem_config_path(pathname, sizeof(pathname), name);
623
624         fd = open(pathname, O_RDWR | O_CREAT, 0660);
625         if (fd < 0) {
626                 RTE_LOG(ERR, EAL, "Cannot open '%s'\n", pathname);
627                 return NULL;
628         }
629
630         size = METADATA_SIZE_ALIGNED;
631
632         retval = fcntl(fd, F_SETLK, lock);
633         if (retval < 0){
634                 close(fd);
635                 RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another "
636                                 "process using it?\n", pathname);
637                 return NULL;
638         }
639
640         retval = ftruncate(fd, size);
641         if (retval < 0){
642                 close(fd);
643                 RTE_LOG(ERR, EAL, "Cannot resize '%s'\n", pathname);
644                 return NULL;
645         }
646
647         metadata_addr = mmap(NULL, size,
648                                 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
649
650         if (metadata_addr == MAP_FAILED){
651                 RTE_LOG(ERR, EAL, "Cannot mmap memory for '%s'\n", pathname);
652
653                 /* we don't care if we can't unlock */
654                 fcntl(fd, F_UNLCK, lock);
655                 close(fd);
656
657                 return NULL;
658         }
659
660         return metadata_addr;
661 }
662
663 int rte_ivshmem_metadata_create(const char *name)
664 {
665         struct ivshmem_config * ivshmem_config;
666         unsigned index;
667
668         if (pagesz == 0)
669                 pagesz = getpagesize();
670
671         if (name == NULL)
672                 return -1;
673
674         rte_spinlock_lock(&global_cfg_sl);
675
676         for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
677                 if (ivshmem_global_config[index].metadata == NULL) {
678                         ivshmem_config = &ivshmem_global_config[index];
679                         break;
680                 }
681         }
682
683         if (index == RTE_DIM(ivshmem_global_config)) {
684                 RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
685                 "Maximum has been reached\n");
686                 rte_spinlock_unlock(&global_cfg_sl);
687                 return -1;
688         }
689
690         ivshmem_config->lock.l_type = F_WRLCK;
691         ivshmem_config->lock.l_whence = SEEK_SET;
692
693         ivshmem_config->lock.l_start = 0;
694         ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;
695
696         ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
697                         ivshmem_metadata_create(
698                                         name,
699                                         sizeof(struct rte_ivshmem_metadata),
700                                         &ivshmem_config->lock));
701
702         if (ivshmem_global_config[index].metadata == NULL) {
703                 rte_spinlock_unlock(&global_cfg_sl);
704                 return -1;
705         }
706
707         /* Metadata setup */
708         memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
709         ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
710         rte_snprintf(ivshmem_config->metadata->name,
711                         sizeof(ivshmem_config->metadata->name), "%s", name);
712
713         rte_spinlock_unlock(&global_cfg_sl);
714
715         return 0;
716 }
717
718 int
719 rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size, const char *name)
720 {
721         const struct memseg_cache_entry * ms_cache, *entry;
722         struct ivshmem_config * config;
723         char cmdline[IVSHMEM_QEMU_CMDLINE_BUFSIZE], *cmdline_ptr;
724         char cfg_file_path[PATH_MAX];
725         unsigned remaining_len, tmplen, iter;
726         uint64_t shared_mem_size, zero_size, total_size;
727
728         if (buffer == NULL || name == NULL)
729                 return -1;
730
731         config = get_config_by_name(name);
732
733         if (config == NULL) {
734                 RTE_LOG(ERR, EAL, "Config %s not found!\n", name);
735                 return -1;
736         }
737
738         rte_spinlock_lock(&config->sl);
739
740         /* prepare metadata file path */
741         rte_snprintf(cfg_file_path, sizeof(cfg_file_path), IVSHMEM_CONFIG_FILE_FMT,
742                         config->metadata->name);
743
744         ms_cache = config->memseg_cache;
745
746         cmdline_ptr = cmdline;
747         remaining_len = sizeof(cmdline);
748
749         shared_mem_size = 0;
750         iter = 0;
751
752         while ((ms_cache[iter].len != 0) && (iter < RTE_DIM(config->metadata->entry))) {
753
754                 entry = &ms_cache[iter];
755
756                 /* Offset and sizes within the current pathname */
757                 tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
758                                 entry->filepath, entry->offset, entry->len);
759
760                 shared_mem_size += entry->len;
761
762                 cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
763                 remaining_len -= tmplen;
764
765                 if (remaining_len == 0) {
766                         RTE_LOG(ERR, EAL, "Command line too long!\n");
767                         rte_spinlock_unlock(&config->sl);
768                         return -1;
769                 }
770
771                 iter++;
772         }
773
774         total_size = rte_align64pow2(shared_mem_size + METADATA_SIZE_ALIGNED);
775         zero_size = total_size - shared_mem_size - METADATA_SIZE_ALIGNED;
776
777         /* add /dev/zero to command-line to fill the space */
778         tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
779                         "/dev/zero",
780                         0x0,
781                         zero_size);
782
783         cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
784         remaining_len -= tmplen;
785
786         if (remaining_len == 0) {
787                 RTE_LOG(ERR, EAL, "Command line too long!\n");
788                 rte_spinlock_unlock(&config->sl);
789                 return -1;
790         }
791
792         /* add metadata file to the end of command-line */
793         tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
794                         cfg_file_path,
795                         0x0,
796                         METADATA_SIZE_ALIGNED);
797
798         cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
799         remaining_len -= tmplen;
800
801         if (remaining_len == 0) {
802                 RTE_LOG(ERR, EAL, "Command line too long!\n");
803                 rte_spinlock_unlock(&config->sl);
804                 return -1;
805         }
806
807         /* if current length of the command line is bigger than the buffer supplied
808          * by the user, or if command-line is bigger than what IVSHMEM accepts */
809         if ((sizeof(cmdline) - remaining_len) > size) {
810                 RTE_LOG(ERR, EAL, "Buffer is too short!\n");
811                 rte_spinlock_unlock(&config->sl);
812                 return -1;
813         }
814         /* complete the command-line */
815         rte_snprintf(buffer, size,
816                         IVSHMEM_QEMU_CMD_LINE_HEADER_FMT,
817                         total_size >> 20,
818                         cmdline);
819
820         rte_spinlock_unlock(&config->sl);
821
822         return 0;
823 }
824
825 void
826 rte_ivshmem_metadata_dump(FILE *f, const char *name)
827 {
828         unsigned i = 0;
829         struct ivshmem_config * config;
830         struct rte_ivshmem_metadata_entry *entry;
831 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
832         uint64_t addr;
833         uint64_t end, hugepage_sz;
834         struct memseg_cache_entry e;
835 #endif
836
837         if (name == NULL)
838                 return;
839
840         /* return error if we try to use an unknown config file */
841         config = get_config_by_name(name);
842         if (config == NULL) {
843                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
844                 return;
845         }
846
847         rte_spinlock_lock(&config->sl);
848
849         entry = &config->metadata->entry[0];
850
851         while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {
852
853                 fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
854                         "virt:%-15p, off:0x%-15lx\n",
855                         i,
856                         entry->mz.name,
857                         entry->mz.phys_addr,
858                         entry->mz.len,
859                         entry->mz.addr,
860                         entry->offset);
861                 i++;
862
863 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
864                 fprintf(f, "\tHugepage files:\n");
865
866                 hugepage_sz = entry->mz.hugepage_sz;
867                 addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
868                 end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
869                                 hugepage_sz);
870
871                 for (; addr < end; addr += hugepage_sz) {
872                         memset(&e, 0, sizeof(e));
873
874                         get_hugefile_by_virt_addr(addr, &e);
875
876                         fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
877                                         addr, addr + hugepage_sz, e.offset, e.filepath);
878                 }
879 #endif
880                 entry++;
881         }
882
883         rte_spinlock_unlock(&config->sl);
884 }