X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_ivshmem.c;h=eea0314b5fad14274eee1ef7adb9ce287a5513b9;hb=a3f34a98b7217f4ff2a8636096984f566a4e7cab;hp=b130e668b9125db4b16514fc2e55b907690b2180;hpb=e8ed6c78177fbbafa326d9efc590a51705848200;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c index b130e668b9..eea0314b5f 100644 --- a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c +++ b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c @@ -50,9 +50,9 @@ #include #include #include +#include #include #include -#include #include "eal_internal_cfg.h" #include "eal_private.h" @@ -61,7 +61,6 @@ #define PCI_DEVICE_ID_IVSHMEM 0x1110 #define IVSHMEM_MAGIC 0x0BADC0DE -#define IVSHMEM_METADATA_SIZE 0x1000 #define IVSHMEM_RESOURCE_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%x/resource2" #define IVSHMEM_CONFIG_PATH "/var/run/.%s_ivshmem_config" @@ -101,7 +100,7 @@ static int memseg_idx; static int pagesz; /* Tailq heads to add rings to */ -TAILQ_HEAD(rte_ring_list, rte_ring); +TAILQ_HEAD(rte_ring_list, rte_tailq_entry); /* * Utility functions @@ -110,8 +109,8 @@ TAILQ_HEAD(rte_ring_list, rte_ring); static int is_ivshmem_device(struct rte_pci_device * dev) { - return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM - && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM); + return dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM + && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM; } static void * @@ -255,17 +254,14 @@ adjacent(const struct rte_memzone * mz1, const struct rte_memzone * mz2) static int has_adjacent_segments(struct ivshmem_segment * ms, int len) { - int i, j, a; + int i, j; for (i = 0; i < len; i++) for (j = i + 1; j < len; j++) { - a = adjacent(&ms[i].entry.mz, &ms[j].entry.mz); - - /* check if segments are adjacent virtually and/or physically but - * not ioremap (since that would indicate that they are from - * different PCI devices and thus don't need to be concatenated. + /* we're only interested in fully adjacent segments; partially + * adjacent segments can coexist. */ - if ((a & (VIRT|PHYS)) > 0 && (a & IOREMAP) == 0) + if (adjacent(&ms[i].entry.mz, &ms[j].entry.mz) == FULL) return 1; } return 0; @@ -364,7 +360,7 @@ read_metadata(char * path, int path_len, int fd, uint64_t flen) sizeof(struct rte_ivshmem_metadata_entry)); /* copy path */ - rte_snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path); + snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path); idx++; } @@ -469,7 +465,7 @@ create_shared_config(void) int fd; /* build ivshmem config file path */ - rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH, + snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH, internal_config.hugefile_prefix); fd = open(path, O_CREAT | O_RDWR, 0600); @@ -520,7 +516,7 @@ open_shared_config(void) int fd; /* build ivshmem config file path */ - rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH, + snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH, internal_config.hugefile_prefix); fd = open(path, O_RDONLY); @@ -726,15 +722,6 @@ map_all_segments(void) * expect memsegs to be empty */ memcpy(&mcfg->memseg[i], &ms, sizeof(struct rte_memseg)); - memcpy(&mcfg->free_memseg[i], &ms, - sizeof(struct rte_memseg)); - - - /* adjust the free_memseg so that there's no free space left */ - mcfg->free_memseg[i].ioremap_addr += mcfg->free_memseg[i].len; - mcfg->free_memseg[i].phys_addr += mcfg->free_memseg[i].len; - mcfg->free_memseg[i].addr_64 += mcfg->free_memseg[i].len; - mcfg->free_memseg[i].len = 0; close(fd); @@ -754,6 +741,7 @@ rte_eal_ivshmem_obj_init(void) struct ivshmem_segment * seg; struct rte_memzone * mz; struct rte_ring * r; + struct rte_tailq_entry *te; unsigned i, ms, idx; uint64_t offset; @@ -763,8 +751,8 @@ rte_eal_ivshmem_obj_init(void) return 0; /* check that we have an initialised ring tail queue */ - if ((ring_list = - RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) { + ring_list = RTE_TAILQ_LOOKUP(RTE_TAILQ_RING_NAME, rte_ring_list); + if (ring_list == NULL) { RTE_LOG(ERR, EAL, "No rte_ring tailq found!\n"); return -1; } @@ -777,12 +765,12 @@ rte_eal_ivshmem_obj_init(void) seg = &ivshmem_config->segment[i]; /* add memzone */ - if (mcfg->memzone_idx == RTE_MAX_MEMZONE) { + if (mcfg->memzone_cnt == RTE_MAX_MEMZONE) { RTE_LOG(ERR, EAL, "No more memory zones available!\n"); return -1; } - idx = mcfg->memzone_idx; + idx = mcfg->memzone_cnt; RTE_LOG(DEBUG, EAL, "Found memzone: '%s' at %p (len 0x%" PRIx64 ")\n", seg->entry.mz.name, seg->entry.mz.addr, seg->entry.mz.len); @@ -805,11 +793,13 @@ rte_eal_ivshmem_obj_init(void) } } - mcfg->memzone_idx++; + mcfg->memzone_cnt++; } + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + /* find rings */ - for (i = 0; i < mcfg->memzone_idx; i++) { + for (i = 0; i < mcfg->memzone_cnt; i++) { mz = &mcfg->memzone[i]; /* check if memzone has a ring prefix */ @@ -819,10 +809,19 @@ rte_eal_ivshmem_obj_init(void) r = (struct rte_ring*) (mz->addr_64); - TAILQ_INSERT_TAIL(ring_list, r, next); + te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, EAL, "Cannot allocate ring tailq entry!\n"); + return -1; + } + + te->data = (void *) r; + + TAILQ_INSERT_TAIL(ring_list, te, next); RTE_LOG(DEBUG, EAL, "Found ring: '%s' at %p\n", r->name, mz->addr); } + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); #ifdef RTE_LIBRTE_IVSHMEM_DEBUG rte_memzone_dump(stdout); @@ -869,7 +868,7 @@ int rte_eal_ivshmem_init(void) continue; /* construct pci device path */ - rte_snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH, + snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH, dev->addr.domain, dev->addr.bus, dev->addr.devid, dev->addr.function); @@ -916,7 +915,7 @@ int rte_eal_ivshmem_init(void) dev->addr.bus, dev->addr.devid, dev->addr.function); ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].ioremap_addr = res->phys_addr; - rte_snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path, + snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path, sizeof(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path), "%s", path);