4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifdef RTE_LIBRTE_IVSHMEM /* hide it from coverage */
42 #include <sys/queue.h>
46 #include <rte_memory.h>
48 #include <rte_eal_memconfig.h>
49 #include <rte_string_fns.h>
50 #include <rte_errno.h>
52 #include <rte_mempool.h>
53 #include <rte_common.h>
54 #include <rte_ivshmem.h>
55 #include <rte_tailq_elem.h>
57 #include "eal_internal_cfg.h"
58 #include "eal_private.h"
60 #define PCI_VENDOR_ID_IVSHMEM 0x1Af4
61 #define PCI_DEVICE_ID_IVSHMEM 0x1110
63 #define IVSHMEM_MAGIC 0x0BADC0DE
64 #define IVSHMEM_METADATA_SIZE 0x1000
66 #define IVSHMEM_RESOURCE_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%x/resource2"
67 #define IVSHMEM_CONFIG_PATH "/var/run/.%s_ivshmem_config"
72 #define FULL (PHYS|VIRT|IOREMAP)
74 #define METADATA_SIZE_ALIGNED \
75 (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
77 #define CONTAINS(x,y)\
78 (((y).addr_64 >= (x).addr_64) && ((y).addr_64 < (x).addr_64 + (x).len))
80 #define DIM(x) (sizeof(x)/sizeof(x[0]))
82 struct ivshmem_pci_device {
84 phys_addr_t ioremap_addr;
87 /* data type to store in config */
88 struct ivshmem_segment {
89 struct rte_ivshmem_metadata_entry entry;
93 struct ivshmem_shared_config {
94 struct ivshmem_segment segment[RTE_MAX_MEMSEG];
96 struct ivshmem_pci_device pci_devs[RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS];
97 uint32_t pci_devs_idx;
99 static struct ivshmem_shared_config * ivshmem_config;
100 static int memseg_idx;
103 /* Tailq heads to add rings to */
104 TAILQ_HEAD(rte_ring_list, rte_ring);
111 is_ivshmem_device(struct rte_pci_device * dev)
113 return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
114 && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
118 map_metadata(int fd, uint64_t len)
120 size_t metadata_len = sizeof(struct rte_ivshmem_metadata);
121 size_t aligned_len = METADATA_SIZE_ALIGNED;
123 return mmap(NULL, metadata_len, PROT_READ | PROT_WRITE,
124 MAP_SHARED, fd, len - aligned_len);
128 unmap_metadata(void * ptr)
130 munmap(ptr, sizeof(struct rte_ivshmem_metadata));
134 has_ivshmem_metadata(int fd, uint64_t len)
136 struct rte_ivshmem_metadata metadata;
139 ptr = map_metadata(fd, len);
141 if (ptr == MAP_FAILED)
144 metadata = *(struct rte_ivshmem_metadata*) (ptr);
148 return metadata.magic_number == IVSHMEM_MAGIC;
152 remove_segment(struct ivshmem_segment * ms, int len, int idx)
156 for (i = idx; i < len - 1; i++)
157 memcpy(&ms[i], &ms[i+1], sizeof(struct ivshmem_segment));
158 memset(&ms[len-1], 0, sizeof(struct ivshmem_segment));
162 overlap(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
164 uint64_t start1, end1, start2, end2;
165 uint64_t p_start1, p_end1, p_start2, p_end2;
166 uint64_t i_start1, i_end1, i_start2, i_end2;
169 /* gather virtual addresses */
170 start1 = mz1->addr_64;
171 end1 = mz1->addr_64 + mz1->len;
172 start2 = mz2->addr_64;
173 end2 = mz2->addr_64 + mz2->len;
175 /* gather physical addresses */
176 p_start1 = mz1->phys_addr;
177 p_end1 = mz1->phys_addr + mz1->len;
178 p_start2 = mz2->phys_addr;
179 p_end2 = mz2->phys_addr + mz2->len;
181 /* gather ioremap addresses */
182 i_start1 = mz1->ioremap_addr;
183 i_end1 = mz1->ioremap_addr + mz1->len;
184 i_start2 = mz2->ioremap_addr;
185 i_end2 = mz2->ioremap_addr + mz2->len;
187 /* check for overlap in virtual addresses */
188 if (start1 >= start2 && start1 < end2)
190 if (start2 >= start1 && start2 < end1)
193 /* check for overlap in physical addresses */
194 if (p_start1 >= p_start2 && p_start1 < p_end2)
196 if (p_start2 >= p_start1 && p_start2 < p_end1)
199 /* check for overlap in ioremap addresses */
200 if (i_start1 >= i_start2 && i_start1 < i_end2)
202 if (i_start2 >= i_start1 && i_start2 < i_end1)
209 adjacent(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
211 uint64_t start1, end1, start2, end2;
212 uint64_t p_start1, p_end1, p_start2, p_end2;
213 uint64_t i_start1, i_end1, i_start2, i_end2;
216 /* gather virtual addresses */
217 start1 = mz1->addr_64;
218 end1 = mz1->addr_64 + mz1->len;
219 start2 = mz2->addr_64;
220 end2 = mz2->addr_64 + mz2->len;
222 /* gather physical addresses */
223 p_start1 = mz1->phys_addr;
224 p_end1 = mz1->phys_addr + mz1->len;
225 p_start2 = mz2->phys_addr;
226 p_end2 = mz2->phys_addr + mz2->len;
228 /* gather ioremap addresses */
229 i_start1 = mz1->ioremap_addr;
230 i_end1 = mz1->ioremap_addr + mz1->len;
231 i_start2 = mz2->ioremap_addr;
232 i_end2 = mz2->ioremap_addr + mz2->len;
234 /* check if segments are virtually adjacent */
240 /* check if segments are physically adjacent */
241 if (p_start1 == p_end2)
243 if (p_start2 == p_end1)
246 /* check if segments are ioremap-adjacent */
247 if (i_start1 == i_end2)
249 if (i_start2 == i_end1)
256 has_adjacent_segments(struct ivshmem_segment * ms, int len)
260 for (i = 0; i < len; i++)
261 for (j = i + 1; j < len; j++) {
262 a = adjacent(&ms[i].entry.mz, &ms[j].entry.mz);
264 /* check if segments are adjacent virtually and/or physically but
265 * not ioremap (since that would indicate that they are from
266 * different PCI devices and thus don't need to be concatenated.
268 if ((a & (VIRT|PHYS)) > 0 && (a & IOREMAP) == 0)
275 has_overlapping_segments(struct ivshmem_segment * ms, int len)
279 for (i = 0; i < len; i++)
280 for (j = i + 1; j < len; j++)
281 if (overlap(&ms[i].entry.mz, &ms[j].entry.mz))
287 seg_compare(const void * a, const void * b)
289 const struct ivshmem_segment * s1 = (const struct ivshmem_segment*) a;
290 const struct ivshmem_segment * s2 = (const struct ivshmem_segment*) b;
292 /* move unallocated zones to the end */
293 if (s1->entry.mz.addr == NULL && s2->entry.mz.addr == NULL)
295 if (s1->entry.mz.addr == 0)
297 if (s2->entry.mz.addr == 0)
300 return s1->entry.mz.phys_addr > s2->entry.mz.phys_addr;
303 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
305 entry_dump(struct rte_ivshmem_metadata_entry *e)
307 RTE_LOG(DEBUG, EAL, "\tvirt: %p-%p\n", e->mz.addr,
308 RTE_PTR_ADD(e->mz.addr, e->mz.len));
309 RTE_LOG(DEBUG, EAL, "\tphys: 0x%" PRIx64 "-0x%" PRIx64 "\n",
311 e->mz.phys_addr + e->mz.len);
312 RTE_LOG(DEBUG, EAL, "\tio: 0x%" PRIx64 "-0x%" PRIx64 "\n",
314 e->mz.ioremap_addr + e->mz.len);
315 RTE_LOG(DEBUG, EAL, "\tlen: 0x%" PRIx64 "\n", e->mz.len);
316 RTE_LOG(DEBUG, EAL, "\toff: 0x%" PRIx64 "\n", e->offset);
326 /* read through metadata mapped from the IVSHMEM device */
328 read_metadata(char * path, int path_len, int fd, uint64_t flen)
330 struct rte_ivshmem_metadata metadata;
331 struct rte_ivshmem_metadata_entry * entry;
335 ptr = map_metadata(fd, flen);
337 if (ptr == MAP_FAILED)
340 metadata = *(struct rte_ivshmem_metadata*) (ptr);
344 RTE_LOG(DEBUG, EAL, "Parsing metadata for \"%s\"\n", metadata.name);
346 idx = ivshmem_config->segment_idx;
348 for (i = 0; i < RTE_LIBRTE_IVSHMEM_MAX_ENTRIES &&
349 idx <= RTE_MAX_MEMSEG; i++) {
351 if (idx == RTE_MAX_MEMSEG) {
352 RTE_LOG(ERR, EAL, "Not enough memory segments!\n");
356 entry = &metadata.entry[i];
358 /* stop on uninitialized memzone */
359 if (entry->mz.len == 0)
362 /* copy metadata entry */
363 memcpy(&ivshmem_config->segment[idx].entry, entry,
364 sizeof(struct rte_ivshmem_metadata_entry));
367 rte_snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path);
371 ivshmem_config->segment_idx = idx;
376 /* check through each segment and look for adjacent or overlapping ones. */
378 cleanup_segments(struct ivshmem_segment * ms, int tbl_len)
380 struct ivshmem_segment * s, * tmp;
381 int i, j, concat, seg_adjacent, seg_overlapping;
382 uint64_t start1, start2, end1, end2, p_start1, p_start2, i_start1, i_start2;
384 qsort(ms, tbl_len, sizeof(struct ivshmem_segment),
387 while (has_overlapping_segments(ms, tbl_len) ||
388 has_adjacent_segments(ms, tbl_len)) {
390 for (i = 0; i < tbl_len; i++) {
395 for (j = i + 1; j < tbl_len; j++) {
398 /* check if this segment is overlapping with existing segment,
399 * or is adjacent to existing segment */
400 seg_overlapping = overlap(&s->entry.mz, &tmp->entry.mz);
401 seg_adjacent = adjacent(&s->entry.mz, &tmp->entry.mz);
403 /* check if segments fully overlap or are fully adjacent */
404 if ((seg_adjacent == FULL) || (seg_overlapping == FULL)) {
406 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
407 RTE_LOG(DEBUG, EAL, "Concatenating segments\n");
408 RTE_LOG(DEBUG, EAL, "Segment %i:\n", i);
409 entry_dump(&s->entry);
410 RTE_LOG(DEBUG, EAL, "Segment %i:\n", j);
411 entry_dump(&tmp->entry);
414 start1 = s->entry.mz.addr_64;
415 start2 = tmp->entry.mz.addr_64;
416 p_start1 = s->entry.mz.phys_addr;
417 p_start2 = tmp->entry.mz.phys_addr;
418 i_start1 = s->entry.mz.ioremap_addr;
419 i_start2 = tmp->entry.mz.ioremap_addr;
420 end1 = s->entry.mz.addr_64 + s->entry.mz.len;
421 end2 = tmp->entry.mz.addr_64 + tmp->entry.mz.len;
423 /* settle for minimum start address and maximum length */
424 s->entry.mz.addr_64 = RTE_MIN(start1, start2);
425 s->entry.mz.phys_addr = RTE_MIN(p_start1, p_start2);
426 s->entry.mz.ioremap_addr = RTE_MIN(i_start1, i_start2);
427 s->entry.offset = RTE_MIN(s->entry.offset, tmp->entry.offset);
428 s->entry.mz.len = RTE_MAX(end1, end2) - s->entry.mz.addr_64;
431 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
432 RTE_LOG(DEBUG, EAL, "Resulting segment:\n");
433 entry_dump(&s->entry);
437 /* if segments not fully overlap, we have an error condition.
438 * adjacent segments can coexist.
440 else if (seg_overlapping > 0) {
441 RTE_LOG(ERR, EAL, "Segments %i and %i overlap!\n", i, j);
442 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
443 RTE_LOG(DEBUG, EAL, "Segment %i:\n", i);
444 entry_dump(&s->entry);
445 RTE_LOG(DEBUG, EAL, "Segment %i:\n", j);
446 entry_dump(&tmp->entry);
453 /* if we concatenated, remove segment at j */
455 remove_segment(ms, tbl_len, j);
466 create_shared_config(void)
471 /* build ivshmem config file path */
472 rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
473 internal_config.hugefile_prefix);
475 fd = open(path, O_CREAT | O_RDWR, 0600);
478 RTE_LOG(ERR, EAL, "Could not open %s: %s\n", path, strerror(errno));
482 /* try ex-locking first - if the file is locked, we have a problem */
483 if (flock(fd, LOCK_EX | LOCK_NB) == -1) {
484 RTE_LOG(ERR, EAL, "Locking %s failed: %s\n", path, strerror(errno));
489 if (ftruncate(fd, sizeof(struct ivshmem_shared_config)) < 0) {
490 RTE_LOG(ERR, EAL, "ftruncate failed: %s\n", strerror(errno));
494 ivshmem_config = mmap(NULL, sizeof(struct ivshmem_shared_config),
495 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
497 if (ivshmem_config == MAP_FAILED)
500 memset(ivshmem_config, 0, sizeof(struct ivshmem_shared_config));
502 /* change the exclusive lock we got earlier to a shared lock */
503 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
504 RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
513 /* open shared config file and, if present, map the config.
514 * having no config file is not an error condition, as we later check if
515 * ivshmem_config is NULL (if it is, that means nothing was mapped). */
517 open_shared_config(void)
522 /* build ivshmem config file path */
523 rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
524 internal_config.hugefile_prefix);
526 fd = open(path, O_RDONLY);
528 /* if the file doesn't exist, just return success */
529 if (fd < 0 && errno == ENOENT)
531 /* else we have an error condition */
533 RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
534 path, strerror(errno));
538 /* try ex-locking first - if the lock *does* succeed, this means it's a
539 * stray config file, so it should be deleted.
541 if (flock(fd, LOCK_EX | LOCK_NB) != -1) {
543 /* if we can't remove the file, something is wrong */
544 if (unlink(path) < 0) {
545 RTE_LOG(ERR, EAL, "Could not remove %s: %s\n", path,
550 /* release the lock */
554 /* return success as having a stray config file is equivalent to not
555 * having config file at all.
560 ivshmem_config = mmap(NULL, sizeof(struct ivshmem_shared_config),
561 PROT_READ, MAP_SHARED, fd, 0);
563 if (ivshmem_config == MAP_FAILED)
566 /* place a shared lock on config file */
567 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
568 RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
578 * This function does the following:
580 * 1) Builds a table of ivshmem_segments with proper offset alignment
581 * 2) Cleans up that table so that we don't have any overlapping or adjacent
583 * 3) Creates memsegs from this table and maps them into memory.
586 map_all_segments(void)
588 struct ivshmem_segment ms_tbl[RTE_MAX_MEMSEG];
589 struct ivshmem_pci_device * pci_dev;
590 struct rte_mem_config * mcfg;
591 struct ivshmem_segment * seg;
594 struct rte_memzone mz;
595 struct rte_memseg ms;
598 phys_addr_t ioremap_addr;
602 memset(ms_tbl, 0, sizeof(ms_tbl));
603 memset(&mz, 0, sizeof(struct rte_memzone));
604 memset(&ms, 0, sizeof(struct rte_memseg));
606 /* first, build a table of memsegs to map, to avoid failed mmaps due to
609 for (i = 0; i < ivshmem_config->segment_idx && i <= RTE_MAX_MEMSEG; i++) {
610 if (i == RTE_MAX_MEMSEG) {
611 RTE_LOG(ERR, EAL, "Too many segments requested!\n");
615 seg = &ivshmem_config->segment[i];
617 /* copy segment to table */
618 memcpy(&ms_tbl[i], seg, sizeof(struct ivshmem_segment));
620 /* find ioremap addr */
621 for (j = 0; j < DIM(ivshmem_config->pci_devs); j++) {
622 pci_dev = &ivshmem_config->pci_devs[j];
623 if (!strncmp(pci_dev->path, seg->path, sizeof(pci_dev->path))) {
624 ioremap_addr = pci_dev->ioremap_addr;
628 if (ioremap_addr == 0) {
629 RTE_LOG(ERR, EAL, "Cannot find ioremap addr!\n");
633 /* work out alignments */
634 align = seg->entry.mz.addr_64 -
635 RTE_ALIGN_FLOOR(seg->entry.mz.addr_64, 0x1000);
636 len = RTE_ALIGN_CEIL(seg->entry.mz.len + align, 0x1000);
638 /* save original alignments */
639 ms_tbl[i].align = align;
641 /* create a memory zone */
642 mz.addr_64 = seg->entry.mz.addr_64 - align;
644 mz.hugepage_sz = seg->entry.mz.hugepage_sz;
645 mz.phys_addr = seg->entry.mz.phys_addr - align;
647 /* find true physical address */
648 mz.ioremap_addr = ioremap_addr + seg->entry.offset - align;
650 ms_tbl[i].entry.offset = seg->entry.offset - align;
652 memcpy(&ms_tbl[i].entry.mz, &mz, sizeof(struct rte_memzone));
655 /* clean up the segments */
656 memseg_idx = cleanup_segments(ms_tbl, ivshmem_config->segment_idx);
661 mcfg = rte_eal_get_configuration()->mem_config;
663 fd_zero = open("/dev/zero", O_RDWR);
666 RTE_LOG(ERR, EAL, "Cannot open /dev/zero: %s\n", strerror(errno));
670 /* create memsegs and put them into DPDK memory */
671 for (i = 0; i < (unsigned) memseg_idx; i++) {
675 ms.addr_64 = seg->entry.mz.addr_64;
676 ms.hugepage_sz = seg->entry.mz.hugepage_sz;
677 ms.len = seg->entry.mz.len;
678 ms.nchannel = rte_memory_get_nchannel();
679 ms.nrank = rte_memory_get_nrank();
680 ms.phys_addr = seg->entry.mz.phys_addr;
681 ms.ioremap_addr = seg->entry.mz.ioremap_addr;
682 ms.socket_id = seg->entry.mz.socket_id;
684 base_addr = mmap(ms.addr, ms.len,
685 PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_zero, 0);
687 if (base_addr == MAP_FAILED || base_addr != ms.addr) {
688 RTE_LOG(ERR, EAL, "Cannot map /dev/zero!\n");
692 fd = open(seg->path, O_RDWR);
695 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", seg->path,
700 munmap(ms.addr, ms.len);
702 base_addr = mmap(ms.addr, ms.len,
703 PROT_READ | PROT_WRITE, MAP_SHARED, fd,
707 if (base_addr == MAP_FAILED || base_addr != ms.addr) {
708 RTE_LOG(ERR, EAL, "Cannot map segment into memory: "
709 "expected %p got %p (%s)\n", ms.addr, base_addr,
714 RTE_LOG(DEBUG, EAL, "Memory segment mapped: %p (len %" PRIx64 ") at "
715 "offset 0x%" PRIx64 "\n",
716 ms.addr, ms.len, seg->entry.offset);
718 /* put the pointers back into their real positions using original
720 ms.addr_64 += seg->align;
721 ms.phys_addr += seg->align;
722 ms.ioremap_addr += seg->align;
723 ms.len -= seg->align;
725 /* at this point, the rest of DPDK memory is not initialized, so we
726 * expect memsegs to be empty */
727 memcpy(&mcfg->memseg[i], &ms,
728 sizeof(struct rte_memseg));
729 memcpy(&mcfg->free_memseg[i], &ms,
730 sizeof(struct rte_memseg));
733 /* adjust the free_memseg so that there's no free space left */
734 mcfg->free_memseg[i].ioremap_addr += mcfg->free_memseg[i].len;
735 mcfg->free_memseg[i].phys_addr += mcfg->free_memseg[i].len;
736 mcfg->free_memseg[i].addr_64 += mcfg->free_memseg[i].len;
737 mcfg->free_memseg[i].len = 0;
741 RTE_LOG(DEBUG, EAL, "IVSHMEM segment found, size: 0x%lx\n",
748 /* this happens at a later stage, after general EAL memory initialization */
750 rte_eal_ivshmem_obj_init(void)
752 struct rte_ring_list* ring_list = NULL;
753 struct rte_mem_config * mcfg;
754 struct ivshmem_segment * seg;
755 struct rte_memzone * mz;
760 /* secondary process would not need any object discovery - it'll all
761 * already be in shared config */
762 if (rte_eal_process_type() != RTE_PROC_PRIMARY || ivshmem_config == NULL)
765 /* check that we have an initialised ring tail queue */
767 RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
768 RTE_LOG(ERR, EAL, "No rte_ring tailq found!\n");
772 mcfg = rte_eal_get_configuration()->mem_config;
774 /* create memzones */
775 for (i = 0; i < ivshmem_config->segment_idx && i <= RTE_MAX_MEMZONE; i++) {
777 seg = &ivshmem_config->segment[i];
780 if (mcfg->memzone_idx == RTE_MAX_MEMZONE) {
781 RTE_LOG(ERR, EAL, "No more memory zones available!\n");
785 idx = mcfg->memzone_idx;
787 RTE_LOG(DEBUG, EAL, "Found memzone: '%s' at %p (len 0x%" PRIx64 ")\n",
788 seg->entry.mz.name, seg->entry.mz.addr, seg->entry.mz.len);
790 memcpy(&mcfg->memzone[idx], &seg->entry.mz,
791 sizeof(struct rte_memzone));
793 /* find ioremap address */
794 for (ms = 0; ms <= RTE_MAX_MEMSEG; ms++) {
795 if (ms == RTE_MAX_MEMSEG) {
796 RTE_LOG(ERR, EAL, "Physical address of segment not found!\n");
799 if (CONTAINS(mcfg->memseg[ms], mcfg->memzone[idx])) {
800 offset = mcfg->memzone[idx].addr_64 -
801 mcfg->memseg[ms].addr_64;
802 mcfg->memzone[idx].ioremap_addr = mcfg->memseg[ms].ioremap_addr +
812 for (i = 0; i < mcfg->memzone_idx; i++) {
813 mz = &mcfg->memzone[i];
815 /* check if memzone has a ring prefix */
816 if (strncmp(mz->name, RTE_RING_MZ_PREFIX,
817 sizeof(RTE_RING_MZ_PREFIX) - 1) != 0)
820 r = (struct rte_ring*) (mz->addr_64);
822 TAILQ_INSERT_TAIL(ring_list, r, next);
824 RTE_LOG(DEBUG, EAL, "Found ring: '%s' at %p\n", r->name, mz->addr);
827 #ifdef RTE_LIBRTE_IVSHMEM_DEBUG
829 rte_ring_list_dump();
835 /* initialize ivshmem structures */
836 int rte_eal_ivshmem_init(void)
838 struct rte_pci_device * dev;
839 struct rte_pci_resource * res;
843 /* initialize everything to 0 */
844 memset(path, 0, sizeof(path));
845 ivshmem_config = NULL;
847 pagesz = getpagesize();
849 RTE_LOG(DEBUG, EAL, "Searching for IVSHMEM devices...\n");
851 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
853 if (open_shared_config() < 0) {
854 RTE_LOG(ERR, EAL, "Could not open IVSHMEM config!\n");
860 TAILQ_FOREACH(dev, &pci_device_list, next) {
862 if (is_ivshmem_device(dev)) {
864 /* IVSHMEM memory is always on BAR2 */
865 res = &dev->mem_resource[2];
867 /* if we don't have a BAR2 */
871 /* construct pci device path */
872 rte_snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH,
873 dev->addr.domain, dev->addr.bus, dev->addr.devid,
876 /* try to find memseg */
877 fd = open(path, O_RDWR);
879 RTE_LOG(ERR, EAL, "Could not open %s\n", path);
883 /* check if it's a DPDK IVSHMEM device */
884 ret = has_ivshmem_metadata(fd, res->len);
889 /* config file creation is deferred until the first
890 * DPDK device is found. then, it has to be created
892 if (ivshmem_config == NULL &&
893 create_shared_config() < 0) {
894 RTE_LOG(ERR, EAL, "Could not create IVSHMEM config!\n");
899 if (read_metadata(path, sizeof(path), fd, res->len) < 0) {
900 RTE_LOG(ERR, EAL, "Could not read metadata from"
901 " device %02x:%02x.%x!\n", dev->addr.bus,
902 dev->addr.devid, dev->addr.function);
907 if (ivshmem_config->pci_devs_idx == RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS) {
908 RTE_LOG(WARNING, EAL,
909 "IVSHMEM PCI device limit exceeded. Increase "
910 "CONFIG_RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS in "
911 "your config file.\n");
915 RTE_LOG(INFO, EAL, "Found IVSHMEM device %02x:%02x.%x\n",
916 dev->addr.bus, dev->addr.devid, dev->addr.function);
918 ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].ioremap_addr = res->phys_addr;
919 rte_snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path,
920 sizeof(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path),
923 ivshmem_config->pci_devs_idx++;
927 RTE_LOG(ERR, EAL, "Could not read IVSHMEM device: %s\n",
932 /* not a DPDK device */
934 RTE_LOG(DEBUG, EAL, "Skipping non-DPDK IVSHMEM device\n");
936 /* close the BAR fd */
942 /* ivshmem_config is not NULL only if config was created and/or mapped */
943 if (ivshmem_config) {
944 if (map_all_segments() < 0) {
945 RTE_LOG(ERR, EAL, "Mapping IVSHMEM segments failed!\n");
950 RTE_LOG(DEBUG, EAL, "No IVSHMEM configuration found! \n");