/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ring.h>
-#include <rte_mempool.h>
+#include <rte_malloc.h>
#include <rte_common.h>
#include <rte_ivshmem.h>
-#include <rte_tailq_elem.h>
#include "eal_internal_cfg.h"
#include "eal_private.h"
#define PCI_DEVICE_ID_IVSHMEM 0x1110
#define IVSHMEM_MAGIC 0x0BADC0DE
-#define IVSHMEM_METADATA_SIZE 0x1000
#define IVSHMEM_RESOURCE_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%x/resource2"
#define IVSHMEM_CONFIG_PATH "/var/run/.%s_ivshmem_config"
static int pagesz;
/* Tailq heads to add rings to */
-TAILQ_HEAD(rte_ring_list, rte_ring);
+TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
/*
* Utility functions
static int
is_ivshmem_device(struct rte_pci_device * dev)
{
- return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
- && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
+ return dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
+ && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM;
}
static void *
static int
has_adjacent_segments(struct ivshmem_segment * ms, int len)
{
- int i, j, a;
+ int i, j;
for (i = 0; i < len; i++)
for (j = i + 1; j < len; j++) {
- a = adjacent(&ms[i].entry.mz, &ms[j].entry.mz);
-
- /* check if segments are adjacent virtually and/or physically but
- * not ioremap (since that would indicate that they are from
- * different PCI devices and thus don't need to be concatenated.
+ /* we're only interested in fully adjacent segments; partially
+ * adjacent segments can coexist.
*/
- if ((a & (VIRT|PHYS)) > 0 && (a & IOREMAP) == 0)
+ if (adjacent(&ms[i].entry.mz, &ms[j].entry.mz) == FULL)
return 1;
}
return 0;
sizeof(struct rte_ivshmem_metadata_entry));
/* copy path */
- rte_snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path);
+ snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path);
idx++;
}
int fd;
/* build ivshmem config file path */
- rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
+ snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
internal_config.hugefile_prefix);
- fd = open(path, O_CREAT | O_RDWR);
+ fd = open(path, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
RTE_LOG(ERR, EAL, "Could not open %s: %s\n", path, strerror(errno));
return -1;
}
- ftruncate(fd, sizeof(struct ivshmem_shared_config));
+ if (ftruncate(fd, sizeof(struct ivshmem_shared_config)) < 0) {
+ RTE_LOG(ERR, EAL, "ftruncate failed: %s\n", strerror(errno));
+ return -1;
+ }
ivshmem_config = mmap(NULL, sizeof(struct ivshmem_shared_config),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
int fd;
/* build ivshmem config file path */
- rte_snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
+ snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
internal_config.hugefile_prefix);
fd = open(path, O_RDONLY);
map_all_segments(void)
{
struct ivshmem_segment ms_tbl[RTE_MAX_MEMSEG];
- struct ivshmem_pci_device * pci_dev;
+ struct ivshmem_pci_device * pci_dev;
struct rte_mem_config * mcfg;
struct ivshmem_segment * seg;
int fd, fd_zero;
* expect memsegs to be empty */
memcpy(&mcfg->memseg[i], &ms,
sizeof(struct rte_memseg));
- memcpy(&mcfg->free_memseg[i], &ms,
- sizeof(struct rte_memseg));
-
-
- /* adjust the free_memseg so that there's no free space left */
- mcfg->free_memseg[i].ioremap_addr += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].phys_addr += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].addr_64 += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].len = 0;
close(fd);
struct ivshmem_segment * seg;
struct rte_memzone * mz;
struct rte_ring * r;
+ struct rte_tailq_entry *te;
unsigned i, ms, idx;
uint64_t offset;
return 0;
/* check that we have an initialised ring tail queue */
- if ((ring_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ ring_list = RTE_TAILQ_LOOKUP(RTE_TAILQ_RING_NAME, rte_ring_list);
+ if (ring_list == NULL) {
RTE_LOG(ERR, EAL, "No rte_ring tailq found!\n");
return -1;
}
seg = &ivshmem_config->segment[i];
/* add memzone */
- if (mcfg->memzone_idx == RTE_MAX_MEMZONE) {
+ if (mcfg->memzone_cnt == RTE_MAX_MEMZONE) {
RTE_LOG(ERR, EAL, "No more memory zones available!\n");
return -1;
}
- idx = mcfg->memzone_idx;
+ idx = mcfg->memzone_cnt;
RTE_LOG(DEBUG, EAL, "Found memzone: '%s' at %p (len 0x%" PRIx64 ")\n",
seg->entry.mz.name, seg->entry.mz.addr, seg->entry.mz.len);
}
}
- mcfg->memzone_idx++;
+ mcfg->memzone_cnt++;
}
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
/* find rings */
- for (i = 0; i < mcfg->memzone_idx; i++) {
+ for (i = 0; i < mcfg->memzone_cnt; i++) {
mz = &mcfg->memzone[i];
/* check if memzone has a ring prefix */
r = (struct rte_ring*) (mz->addr_64);
- TAILQ_INSERT_TAIL(ring_list, r, next);
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate ring tailq entry!\n");
+ return -1;
+ }
+
+ te->data = (void *) r;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
RTE_LOG(DEBUG, EAL, "Found ring: '%s' at %p\n", r->name, mz->addr);
}
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
- rte_memzone_dump();
- rte_ring_list_dump();
+ rte_memzone_dump(stdout);
+ rte_ring_list_dump(stdout);
#endif
return 0;
/* initialize everything to 0 */
memset(path, 0, sizeof(path));
ivshmem_config = NULL;
-
+
pagesz = getpagesize();
RTE_LOG(DEBUG, EAL, "Searching for IVSHMEM devices...\n");
continue;
/* construct pci device path */
- rte_snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH,
+ snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH,
dev->addr.domain, dev->addr.bus, dev->addr.devid,
dev->addr.function);
dev->addr.bus, dev->addr.devid, dev->addr.function);
ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].ioremap_addr = res->phys_addr;
- rte_snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path,
+ snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path,
sizeof(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path),
- path);
+ "%s", path);
ivshmem_config->pci_devs_idx++;
}