add FILE argument to debug functions
[dpdk.git] / lib / librte_ivshmem / rte_ivshmem.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <fcntl.h>
34 #include <limits.h>
35 #include <unistd.h>
36 #include <sys/mman.h>
37 #include <string.h>
38
39 #include <rte_eal_memconfig.h>
40 #include <rte_memory.h>
41 #include <rte_ivshmem.h>
42 #include <rte_string_fns.h>
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_spinlock.h>
47 #include <rte_common.h>
48 #include <rte_malloc.h>
49
50 #include "rte_ivshmem.h"
51
52 #define IVSHMEM_CONFIG_FILE_FMT "/var/run/.dpdk_ivshmem_metadata_%s"
53 #define IVSHMEM_QEMU_CMD_LINE_HEADER_FMT "-device ivshmem,size=%" PRIu64 "M,shm=fd%s"
54 #define IVSHMEM_QEMU_CMD_FD_FMT ":%s:0x%" PRIx64 ":0x%" PRIx64
55 #define IVSHMEM_QEMU_CMDLINE_BUFSIZE 1024
56 #define IVSHMEM_MAX_PAGES (1 << 12)
57 #define adjacent(x,y) (((x).phys_addr+(x).len)==(y).phys_addr)
58 #define METADATA_SIZE_ALIGNED \
59         (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
60
61 #define GET_PAGEMAP_ADDR(in,addr,dlm,err)    \
62 {                                      \
63         char *end;                         \
64         errno = 0;                         \
65         addr = strtoull((in), &end, 16);   \
66         if (errno != 0 || *end != (dlm)) { \
67                 RTE_LOG(ERR, EAL, err);        \
68                 goto error;                    \
69         }                                  \
70         (in) = end + 1;                    \
71 }
72
73 static int pagesz;
74
75 struct memseg_cache_entry {
76         char filepath[PATH_MAX];
77         uint64_t offset;
78         uint64_t len;
79 };
80
81 struct ivshmem_config {
82         struct rte_ivshmem_metadata * metadata;
83         struct memseg_cache_entry memseg_cache[IVSHMEM_MAX_PAGES];
84                 /**< account for multiple files per segment case */
85         struct flock lock;
86         rte_spinlock_t sl;
87 };
88
89 static struct ivshmem_config
90 ivshmem_global_config[RTE_LIBRTE_IVSHMEM_MAX_METADATA_FILES];
91
92 static rte_spinlock_t global_cfg_sl;
93
94 static struct ivshmem_config *
95 get_config_by_name(const char * name)
96 {
97         struct rte_ivshmem_metadata * config;
98         unsigned i;
99
100         for (i = 0; i < RTE_DIM(ivshmem_global_config); i++) {
101                 config = ivshmem_global_config[i].metadata;
102                 if (config == NULL)
103                         return NULL;
104                 if (strncmp(name, config->name, IVSHMEM_NAME_LEN) == 0)
105                         return &ivshmem_global_config[i];
106         }
107
108         return NULL;
109 }
110
111 static int
112 overlap(const struct rte_memzone * s1, const struct rte_memzone * s2)
113 {
114         uint64_t start1, end1, start2, end2;
115
116         start1 = s1->addr_64;
117         end1 = s1->addr_64 + s1->len;
118         start2 = s2->addr_64;
119         end2 = s2->addr_64 + s2->len;
120
121         if (start1 >= start2 && start1 < end2)
122                 return 1;
123         if (start2 >= start1 && start2 < end1)
124                 return 1;
125
126         return 0;
127 }
128
129 static struct rte_memzone *
130 get_memzone_by_addr(const void * addr)
131 {
132         struct rte_memzone * tmp, * mz;
133         struct rte_mem_config * mcfg;
134         int i;
135
136         mcfg = rte_eal_get_configuration()->mem_config;
137         mz = NULL;
138
139         /* find memzone for the ring */
140         for (i = 0; i < RTE_MAX_MEMZONE; i++) {
141                 tmp = &mcfg->memzone[i];
142
143                 if (tmp->addr_64 == (uint64_t) addr) {
144                         mz = tmp;
145                         break;
146                 }
147         }
148
149         return mz;
150 }
151
152 static int
153 entry_compare(const void * a, const void * b)
154 {
155         const struct rte_ivshmem_metadata_entry * e1 =
156                         (const struct rte_ivshmem_metadata_entry*) a;
157         const struct rte_ivshmem_metadata_entry * e2 =
158                         (const struct rte_ivshmem_metadata_entry*) b;
159
160         /* move unallocated zones to the end */
161         if (e1->mz.addr == NULL && e2->mz.addr == NULL)
162                 return 0;
163         if (e1->mz.addr == 0)
164                 return 1;
165         if (e2->mz.addr == 0)
166                 return -1;
167
168         return e1->mz.phys_addr > e2->mz.phys_addr;
169 }
170
171 /* fills hugepage cache entry for a given start virt_addr */
172 static int
173 get_hugefile_by_virt_addr(uint64_t virt_addr, struct memseg_cache_entry * e)
174 {
175         uint64_t start_addr, end_addr;
176         char *start,*path_end;
177         char buf[PATH_MAX*2];
178         FILE *f;
179
180         start = NULL;
181         path_end = NULL;
182         start_addr = 0;
183
184         memset(e->filepath, 0, sizeof(e->filepath));
185
186         /* open /proc/self/maps */
187         f = fopen("/proc/self/maps", "r");
188         if (f == NULL) {
189                 RTE_LOG(ERR, EAL, "cannot open /proc/self/maps!\n");
190                 return -1;
191         }
192
193         /* parse maps */
194         while (fgets(buf, sizeof(buf), f) != NULL) {
195
196                 /* get endptr to end of start addr */
197                 start = buf;
198
199                 GET_PAGEMAP_ADDR(start,start_addr,'-',
200                                 "Cannot find start address in maps!\n");
201
202                 /* if start address is bigger than our address, skip */
203                 if (start_addr > virt_addr)
204                         continue;
205
206                 GET_PAGEMAP_ADDR(start,end_addr,' ',
207                                 "Cannot find end address in maps!\n");
208
209                 /* if end address is less than our address, skip */
210                 if (end_addr <= virt_addr)
211                         continue;
212
213                 /* find where the path starts */
214                 start = strstr(start, "/");
215
216                 if (start == NULL)
217                         continue;
218
219                 /* at this point, we know that this is our map.
220                  * now let's find the file */
221                 path_end = strstr(start, "\n");
222                 break;
223         }
224
225         if (path_end == NULL) {
226                 RTE_LOG(ERR, EAL, "Hugefile path not found!\n");
227                 goto error;
228         }
229
230         /* calculate offset and copy the file path */
231         rte_snprintf(e->filepath, RTE_PTR_DIFF(path_end, start) + 1, "%s", start);
232
233         e->offset = virt_addr - start_addr;
234
235         fclose(f);
236
237         return 0;
238 error:
239         fclose(f);
240         return -1;
241 }
242
243 /*
244  * This is a complex function. What it does is the following:
245  *  1. Goes through metadata and gets list of hugepages involved
246  *  2. Sorts the hugepages by size (1G first)
247  *  3. Goes through metadata again and writes correct offsets
248  *  4. Goes through pages and finds out their filenames, offsets etc.
249  */
250 static int
251 build_config(struct rte_ivshmem_metadata * metadata)
252 {
253         struct rte_ivshmem_metadata_entry * e_local;
254         struct memseg_cache_entry * ms_local;
255         struct rte_memseg pages[IVSHMEM_MAX_PAGES];
256         struct rte_ivshmem_metadata_entry *entry;
257         struct memseg_cache_entry * c_entry, * prev_entry;
258         struct ivshmem_config * config;
259         unsigned i, j, mz_iter, ms_iter;
260         uint64_t biggest_len;
261         int biggest_idx;
262
263         /* return error if we try to use an unknown config file */
264         config = get_config_by_name(metadata->name);
265         if (config == NULL) {
266                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", metadata->name);
267                 goto fail_e;
268         }
269
270         memset(pages, 0, sizeof(pages));
271
272         e_local = malloc(sizeof(config->metadata->entry));
273         if (e_local == NULL)
274                 goto fail_e;
275         ms_local = malloc(sizeof(config->memseg_cache));
276         if (ms_local == NULL)
277                 goto fail_ms;
278
279
280         /* make local copies before doing anything */
281         memcpy(e_local, config->metadata->entry, sizeof(config->metadata->entry));
282         memcpy(ms_local, config->memseg_cache, sizeof(config->memseg_cache));
283
284         qsort(e_local, RTE_DIM(config->metadata->entry), sizeof(struct rte_ivshmem_metadata_entry),
285                         entry_compare);
286
287         /* first pass - collect all huge pages */
288         for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
289
290                 entry = &e_local[mz_iter];
291
292                 uint64_t start_addr = RTE_ALIGN_FLOOR(entry->mz.addr_64,
293                                 entry->mz.hugepage_sz);
294                 uint64_t offset = entry->mz.addr_64 - start_addr;
295                 uint64_t len = RTE_ALIGN_CEIL(entry->mz.len + offset,
296                                 entry->mz.hugepage_sz);
297
298                 if (entry->mz.addr_64 == 0 || start_addr == 0 || len == 0)
299                         continue;
300
301                 int start_page;
302
303                 /* find first unused page - mz are phys_addr sorted so we don't have to
304                  * look out for holes */
305                 for (i = 0; i < RTE_DIM(pages); i++) {
306
307                         /* skip if we already have this page */
308                         if (pages[i].addr_64 == start_addr) {
309                                 start_addr += entry->mz.hugepage_sz;
310                                 len -= entry->mz.hugepage_sz;
311                                 continue;
312                         }
313                         /* we found a new page */
314                         else if (pages[i].addr_64 == 0) {
315                                 start_page = i;
316                                 break;
317                         }
318                 }
319                 if (i == RTE_DIM(pages)) {
320                         RTE_LOG(ERR, EAL, "Cannot find unused page!\n");
321                         goto fail;
322                 }
323
324                 /* populate however many pages the memzone has */
325                 for (i = start_page; i < RTE_DIM(pages) && len != 0; i++) {
326
327                         pages[i].addr_64 = start_addr;
328                         pages[i].len = entry->mz.hugepage_sz;
329                         start_addr += entry->mz.hugepage_sz;
330                         len -= entry->mz.hugepage_sz;
331                 }
332                 /* if there's still length left */
333                 if (len != 0) {
334                         RTE_LOG(ERR, EAL, "Not enough space for pages!\n");
335                         goto fail;
336                 }
337         }
338
339         /* second pass - sort pages by size */
340         for (i = 0; i < RTE_DIM(pages); i++) {
341
342                 if (pages[i].addr == NULL)
343                         break;
344
345                 biggest_len = 0;
346                 biggest_idx = -1;
347
348                 /*
349                  * browse all entries starting at 'i', and find the
350                  * entry with the smallest addr
351                  */
352                 for (j=i; j< RTE_DIM(pages); j++) {
353                         if (pages[j].addr == NULL)
354                                         break;
355                         if (biggest_len == 0 ||
356                                 pages[j].len > biggest_len) {
357                                 biggest_len = pages[j].len;
358                                 biggest_idx = j;
359                         }
360                 }
361
362                 /* should not happen */
363                 if (biggest_idx == -1) {
364                         RTE_LOG(ERR, EAL, "Error sorting by size!\n");
365                         goto fail;
366                 }
367                 if (i != (unsigned) biggest_idx) {
368                         struct rte_memseg tmp;
369
370                         memcpy(&tmp, &pages[biggest_idx], sizeof(struct rte_memseg));
371
372                         /* we don't want to break contiguousness, so instead of just
373                          * swapping segments, we move all the preceding segments to the
374                          * right and then put the old segment @ biggest_idx in place of
375                          * segment @ i */
376                         for (j = biggest_idx - 1; j >= i; j--) {
377                                 memcpy(&pages[j+1], &pages[j], sizeof(struct rte_memseg));
378                                 memset(&pages[j], 0, sizeof(struct rte_memseg));
379                         }
380
381                         /* put old biggest segment to its new place */
382                         memcpy(&pages[i], &tmp, sizeof(struct rte_memseg));
383                 }
384         }
385
386         /* third pass - write correct offsets */
387         for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
388
389                 uint64_t offset = 0;
390
391                 entry = &e_local[mz_iter];
392
393                 if (entry->mz.addr_64 == 0)
394                         break;
395
396                 /* find page for current memzone */
397                 for (i = 0; i < RTE_DIM(pages); i++) {
398                         /* we found our page */
399                         if (entry->mz.addr_64 >= pages[i].addr_64 &&
400                                         entry->mz.addr_64 < pages[i].addr_64 + pages[i].len) {
401                                 entry->offset = (entry->mz.addr_64 - pages[i].addr_64) +
402                                                 offset;
403                                 break;
404                         }
405                         offset += pages[i].len;
406                 }
407                 if (i == RTE_DIM(pages)) {
408                         RTE_LOG(ERR, EAL, "Page not found!\n");
409                         goto fail;
410                 }
411         }
412
413         ms_iter = 0;
414         prev_entry = NULL;
415
416         /* fourth pass - create proper memseg cache */
417         for (i = 0; i < RTE_DIM(pages) &&
418                         ms_iter <= RTE_DIM(config->memseg_cache); i++) {
419                 if (pages[i].addr_64 == 0)
420                         break;
421
422
423                 if (ms_iter == RTE_DIM(pages)) {
424                         RTE_LOG(ERR, EAL, "The universe has collapsed!\n");
425                         goto fail;
426                 }
427
428                 c_entry = &ms_local[ms_iter];
429                 c_entry->len = pages[i].len;
430
431                 if (get_hugefile_by_virt_addr(pages[i].addr_64, c_entry) < 0)
432                         goto fail;
433
434                 /* if previous entry has the same filename and is contiguous,
435                  * clear current entry and increase previous entry's length
436                  */
437                 if (prev_entry != NULL &&
438                                 strncmp(c_entry->filepath, prev_entry->filepath,
439                                 sizeof(c_entry->filepath)) == 0 &&
440                                 prev_entry->offset + prev_entry->len == c_entry->offset) {
441                         prev_entry->len += pages[i].len;
442                         memset(c_entry, 0, sizeof(struct memseg_cache_entry));
443                 }
444                 else {
445                         prev_entry = c_entry;
446                         ms_iter++;
447                 }
448         }
449
450         /* update current configuration with new valid data */
451         memcpy(config->metadata->entry, e_local, sizeof(config->metadata->entry));
452         memcpy(config->memseg_cache, ms_local, sizeof(config->memseg_cache));
453
454         free(ms_local);
455         free(e_local);
456
457         return 0;
458 fail:
459         free(ms_local);
460 fail_ms:
461         free(e_local);
462 fail_e:
463         return -1;
464 }
465
466 static int
467 add_memzone_to_metadata(const struct rte_memzone * mz,
468                 struct ivshmem_config * config)
469 {
470         struct rte_ivshmem_metadata_entry * entry;
471         unsigned i;
472
473         rte_spinlock_lock(&config->sl);
474
475         /* find free slot in this config */
476         for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
477                 entry = &config->metadata->entry[i];
478
479                 if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
480                         RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
481                         goto fail;
482                 }
483
484                 /* if addr is zero, the memzone is probably free */
485                 if (entry->mz.addr_64 == 0) {
486                         RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
487                                         mz->name, mz->addr, config->metadata->name);
488                         memcpy(&entry->mz, mz, sizeof(struct rte_memzone));
489
490                         /* run config file parser */
491                         if (build_config(config->metadata) < 0)
492                                 goto fail;
493
494                         break;
495                 }
496         }
497
498         /* if we reached the maximum, that means we have no place in config */
499         if (i == RTE_DIM(config->metadata->entry)) {
500                 RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
501                                 config->metadata->name);
502                 goto fail;
503         }
504
505         rte_spinlock_unlock(&config->sl);
506         return 0;
507 fail:
508         rte_spinlock_unlock(&config->sl);
509         return -1;
510 }
511
512 static int
513 add_ring_to_metadata(const struct rte_ring * r,
514                 struct ivshmem_config * config)
515 {
516         struct rte_memzone * mz;
517
518         mz = get_memzone_by_addr(r);
519
520         if (!mz) {
521                 RTE_LOG(ERR, EAL, "Cannot find memzone for ring!\n");
522                 return -1;
523         }
524
525         return add_memzone_to_metadata(mz, config);
526 }
527
528 static int
529 add_mempool_to_metadata(const struct rte_mempool * mp,
530                 struct ivshmem_config * config)
531 {
532         struct rte_memzone * mz;
533         int ret;
534
535         mz = get_memzone_by_addr(mp);
536         ret = 0;
537
538         if (!mz) {
539                 RTE_LOG(ERR, EAL, "Cannot find memzone for mempool!\n");
540                 return -1;
541         }
542
543         /* mempool consists of memzone and ring */
544         ret = add_memzone_to_metadata(mz, config);
545         if (ret < 0)
546                 return -1;
547
548         return add_ring_to_metadata(mp->ring, config);
549 }
550
551 int
552 rte_ivshmem_metadata_add_ring(const struct rte_ring * r, const char * name)
553 {
554         struct ivshmem_config * config;
555
556         if (name == NULL || r == NULL)
557                 return -1;
558
559         config = get_config_by_name(name);
560
561         if (config == NULL) {
562                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
563                 return -1;
564         }
565
566         return add_ring_to_metadata(r, config);
567 }
568
569 int
570 rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz, const char * name)
571 {
572         struct ivshmem_config * config;
573
574         if (name == NULL || mz == NULL)
575                 return -1;
576
577         config = get_config_by_name(name);
578
579         if (config == NULL) {
580                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
581                 return -1;
582         }
583
584         return add_memzone_to_metadata(mz, config);
585 }
586
587 int
588 rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp, const char * name)
589 {
590         struct ivshmem_config * config;
591
592         if (name == NULL || mp == NULL)
593                 return -1;
594
595         config = get_config_by_name(name);
596
597         if (config == NULL) {
598                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
599                 return -1;
600         }
601
602         return add_mempool_to_metadata(mp, config);
603 }
604
605 static inline void
606 ivshmem_config_path(char *buffer, size_t bufflen, const char *name)
607 {
608         rte_snprintf(buffer, bufflen, IVSHMEM_CONFIG_FILE_FMT, name);
609 }
610
611
612
613 static inline
614 void *ivshmem_metadata_create(const char *name, size_t size,
615                 struct flock *lock)
616 {
617         int retval, fd;
618         void *metadata_addr;
619         char pathname[PATH_MAX];
620
621         ivshmem_config_path(pathname, sizeof(pathname), name);
622
623         fd = open(pathname, O_RDWR | O_CREAT, 0660);
624         if (fd < 0) {
625                 RTE_LOG(ERR, EAL, "Cannot open '%s'\n", pathname);
626                 return NULL;
627         }
628
629         size = METADATA_SIZE_ALIGNED;
630
631         retval = fcntl(fd, F_SETLK, lock);
632         if (retval < 0){
633                 close(fd);
634                 RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another "
635                                 "process using it?\n", pathname);
636                 return NULL;
637         }
638
639         retval = ftruncate(fd, size);
640         if (retval < 0){
641                 close(fd);
642                 RTE_LOG(ERR, EAL, "Cannot resize '%s'\n", pathname);
643                 return NULL;
644         }
645
646         metadata_addr = mmap(NULL, size,
647                                 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
648
649         if (metadata_addr == MAP_FAILED){
650                 RTE_LOG(ERR, EAL, "Cannot mmap memory for '%s'\n", pathname);
651
652                 /* we don't care if we can't unlock */
653                 fcntl(fd, F_UNLCK, lock);
654                 close(fd);
655
656                 return NULL;
657         }
658
659         return metadata_addr;
660 }
661
662 int rte_ivshmem_metadata_create(const char *name)
663 {
664         struct ivshmem_config * ivshmem_config;
665         unsigned index;
666
667         if (pagesz == 0)
668                 pagesz = getpagesize();
669
670         if (name == NULL)
671                 return -1;
672
673         rte_spinlock_lock(&global_cfg_sl);
674
675         for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
676                 if (ivshmem_global_config[index].metadata == NULL) {
677                         ivshmem_config = &ivshmem_global_config[index];
678                         break;
679                 }
680         }
681
682         if (index == RTE_DIM(ivshmem_global_config)) {
683                 RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
684                 "Maximum has been reached\n");
685                 rte_spinlock_unlock(&global_cfg_sl);
686                 return -1;
687         }
688
689         ivshmem_config->lock.l_type = F_WRLCK;
690         ivshmem_config->lock.l_whence = SEEK_SET;
691
692         ivshmem_config->lock.l_start = 0;
693         ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;
694
695         ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
696                         ivshmem_metadata_create(
697                                         name,
698                                         sizeof(struct rte_ivshmem_metadata),
699                                         &ivshmem_config->lock));
700
701         if (ivshmem_global_config[index].metadata == NULL) {
702                 rte_spinlock_unlock(&global_cfg_sl);
703                 return -1;
704         }
705
706         /* Metadata setup */
707         memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
708         ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
709         rte_snprintf(ivshmem_config->metadata->name,
710                         sizeof(ivshmem_config->metadata->name), "%s", name);
711
712         rte_spinlock_unlock(&global_cfg_sl);
713
714         return 0;
715 }
716
717 int
718 rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size, const char *name)
719 {
720         const struct memseg_cache_entry * ms_cache, *entry;
721         struct ivshmem_config * config;
722         char cmdline[IVSHMEM_QEMU_CMDLINE_BUFSIZE], *cmdline_ptr;
723         char cfg_file_path[PATH_MAX];
724         unsigned remaining_len, tmplen, iter;
725         uint64_t shared_mem_size, zero_size, total_size;
726
727         if (buffer == NULL || name == NULL)
728                 return -1;
729
730         config = get_config_by_name(name);
731
732         if (config == NULL) {
733                 RTE_LOG(ERR, EAL, "Config %s not found!\n", name);
734                 return -1;
735         }
736
737         rte_spinlock_lock(&config->sl);
738
739         /* prepare metadata file path */
740         rte_snprintf(cfg_file_path, sizeof(cfg_file_path), IVSHMEM_CONFIG_FILE_FMT,
741                         config->metadata->name);
742
743         ms_cache = config->memseg_cache;
744
745         cmdline_ptr = cmdline;
746         remaining_len = sizeof(cmdline);
747
748         shared_mem_size = 0;
749         iter = 0;
750
751         while ((ms_cache[iter].len != 0) && (iter < RTE_DIM(config->metadata->entry))) {
752
753                 entry = &ms_cache[iter];
754
755                 /* Offset and sizes within the current pathname */
756                 tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
757                                 entry->filepath, entry->offset, entry->len);
758
759                 shared_mem_size += entry->len;
760
761                 cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
762                 remaining_len -= tmplen;
763
764                 if (remaining_len == 0) {
765                         RTE_LOG(ERR, EAL, "Command line too long!\n");
766                         rte_spinlock_unlock(&config->sl);
767                         return -1;
768                 }
769
770                 iter++;
771         }
772
773         total_size = rte_align64pow2(shared_mem_size + METADATA_SIZE_ALIGNED);
774         zero_size = total_size - shared_mem_size - METADATA_SIZE_ALIGNED;
775
776         /* add /dev/zero to command-line to fill the space */
777         tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
778                         "/dev/zero",
779                         0x0,
780                         zero_size);
781
782         cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
783         remaining_len -= tmplen;
784
785         if (remaining_len == 0) {
786                 RTE_LOG(ERR, EAL, "Command line too long!\n");
787                 rte_spinlock_unlock(&config->sl);
788                 return -1;
789         }
790
791         /* add metadata file to the end of command-line */
792         tmplen = rte_snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
793                         cfg_file_path,
794                         0x0,
795                         METADATA_SIZE_ALIGNED);
796
797         cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
798         remaining_len -= tmplen;
799
800         if (remaining_len == 0) {
801                 RTE_LOG(ERR, EAL, "Command line too long!\n");
802                 rte_spinlock_unlock(&config->sl);
803                 return -1;
804         }
805
806         /* if current length of the command line is bigger than the buffer supplied
807          * by the user, or if command-line is bigger than what IVSHMEM accepts */
808         if ((sizeof(cmdline) - remaining_len) > size) {
809                 RTE_LOG(ERR, EAL, "Buffer is too short!\n");
810                 rte_spinlock_unlock(&config->sl);
811                 return -1;
812         }
813         /* complete the command-line */
814         rte_snprintf(buffer, size,
815                         IVSHMEM_QEMU_CMD_LINE_HEADER_FMT,
816                         total_size >> 20,
817                         cmdline);
818
819         rte_spinlock_unlock(&config->sl);
820
821         return 0;
822 }
823
824 void
825 rte_ivshmem_metadata_dump(FILE *f, const char *name)
826 {
827         unsigned i = 0;
828         struct ivshmem_config * config;
829         struct rte_ivshmem_metadata_entry *entry;
830 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
831         uint64_t addr;
832         uint64_t end, hugepage_sz;
833         struct memseg_cache_entry e;
834 #endif
835
836         if (name == NULL)
837                 return;
838
839         /* return error if we try to use an unknown config file */
840         config = get_config_by_name(name);
841         if (config == NULL) {
842                 RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
843                 return;
844         }
845
846         rte_spinlock_lock(&config->sl);
847
848         entry = &config->metadata->entry[0];
849
850         while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {
851
852                 fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
853                         "virt:%-15p, off:0x%-15lx\n",
854                         i,
855                         entry->mz.name,
856                         entry->mz.phys_addr,
857                         entry->mz.len,
858                         entry->mz.addr,
859                         entry->offset);
860                 i++;
861
862 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
863                 fprintf(f, "\tHugepage files:\n");
864
865                 hugepage_sz = entry->mz.hugepage_sz;
866                 addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
867                 end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
868                                 hugepage_sz);
869
870                 for (; addr < end; addr += hugepage_sz) {
871                         memset(&e, 0, sizeof(e));
872
873                         get_hugefile_by_virt_addr(addr, &e);
874
875                         fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
876                                         addr, addr + hugepage_sz, e.offset, e.filepath);
877                 }
878 #endif
879                 entry++;
880         }
881
882         rte_spinlock_unlock(&config->sl);
883 }