#include <string.h>
#include <stdint.h>
#include <stdarg.h>
+#include <unistd.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
}
/*
- * Depending on memory configuration, objects addresses are spreaded
+ * Depending on memory configuration, objects addresses are spread
* between channels and ranks in RAM: the pool allocator will add
* padding between objects. This function return the new size of the
* object.
/* process new object size */
new_obj_size = (obj_size + CACHE_LINE_MASK) / CACHE_LINE_SIZE;
- while (get_gcd(new_obj_size, nrank * nchan) != 1 ||
- get_gcd(nchan, new_obj_size) != 1)
+ while (get_gcd(new_obj_size, nrank * nchan) != 1)
new_obj_size++;
return new_obj_size * CACHE_LINE_SIZE;
}
/*
* increase trailer to add padding between objects in order to
- * spread them accross memory channels/ranks
+ * spread them across memory channels/ranks
*/
if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
unsigned new_size;
sz->trailer_size = new_size - sz->header_size - sz->elt_size;
}
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * compute trailer size so that pool elements fit exactly in
+ * a standard page
+ */
+ int page_size = getpagesize();
+ int new_size = page_size - sz->header_size - sz->elt_size;
+ if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
+ printf("When hugepages are disabled, pool objects "
+ "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
+ sz->header_size, sz->elt_size, sz->trailer_size,
+ page_size);
+ return 0;
+ }
+ sz->trailer_size = new_size;
+ }
+
/* this is the size of an object, including header and trailer */
sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
size_t mempool_size;
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
int rg_flags = 0;
- void *obj;
+ void *obj;
struct rte_mempool_objsz objsz;
+ void *startaddr;
+ int page_size = getpagesize();
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
rg_flags |= RING_F_SC_DEQ;
/* calculate mempool object sizes. */
- rte_mempool_calc_obj_size(elt_size, flags, &objsz);
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
private_data_size = (private_data_size +
CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * expand private data size to a whole page, so that the
+ * first pool element will start on a new standard page
+ */
+ int head = sizeof(struct rte_mempool);
+ int new_size = (private_data_size + head) % page_size;
+ if (new_size) {
+ private_data_size += page_size - new_size;
+ }
+ }
+
/*
* If user provided an external memory buffer, then use it to
* store mempool objects. Otherwise reserve memzone big enough to
if (vaddr == NULL)
mempool_size += (size_t)objsz.total_size * n;
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * we want the memory pool to start on a page boundary,
+ * because pool elements crossing page boundaries would
+ * result in discontiguous physical addresses
+ */
+ mempool_size += page_size;
+ }
+
rte_snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
if (mz == NULL)
goto exit;
+ if (rte_eal_has_hugepages()) {
+ startaddr = (void*)mz->addr;
+ } else {
+ /* align memory pool start address on a page boundary */
+ unsigned long addr = (unsigned long)mz->addr;
+ if (addr & (page_size - 1)) {
+ addr += page_size;
+ addr &= ~(page_size - 1);
+ }
+ startaddr = (void*)addr;
+ }
+
/* init the mempool structure */
- mp = mz->addr;
+ mp = startaddr;
memset(mp, 0, sizeof(*mp));
rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
mp->phys_addr = mz->phys_addr;
/* dump the cache status */
static unsigned
-rte_mempool_dump_cache(const struct rte_mempool *mp)
+rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
unsigned lcore_id;
unsigned count = 0;
unsigned cache_count;
- printf(" cache infos:\n");
- printf(" cache_size=%"PRIu32"\n", mp->cache_size);
+ fprintf(f, " cache infos:\n");
+ fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size);
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
cache_count = mp->local_cache[lcore_id].len;
- printf(" cache_count[%u]=%u\n", lcore_id, cache_count);
+ fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count);
count += cache_count;
}
- printf(" total_cache_count=%u\n", count);
+ fprintf(f, " total_cache_count=%u\n", count);
return count;
#else
RTE_SET_USED(mp);
- printf(" cache disabled\n");
+ fprintf(f, " cache disabled\n");
return 0;
#endif
}
/* dump the status of the mempool on the console */
void
-rte_mempool_dump(const struct rte_mempool *mp)
+rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
{
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
struct rte_mempool_debug_stats sum;
unsigned common_count;
unsigned cache_count;
- printf("mempool <%s>@%p\n", mp->name, mp);
- printf(" flags=%x\n", mp->flags);
- printf(" ring=<%s>@%p\n", mp->ring->name, mp->ring);
- printf(" phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
- printf(" size=%"PRIu32"\n", mp->size);
- printf(" header_size=%"PRIu32"\n", mp->header_size);
- printf(" elt_size=%"PRIu32"\n", mp->elt_size);
- printf(" trailer_size=%"PRIu32"\n", mp->trailer_size);
- printf(" total_obj_size=%"PRIu32"\n",
+ fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
+ fprintf(f, " flags=%x\n", mp->flags);
+ fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
+ fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
+ fprintf(f, " size=%"PRIu32"\n", mp->size);
+ fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
+ fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size);
+ fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size);
+ fprintf(f, " total_obj_size=%"PRIu32"\n",
mp->header_size + mp->elt_size + mp->trailer_size);
- printf(" private_data_size=%"PRIu32"\n", mp->private_data_size);
- printf(" pg_num=%"PRIu32"\n", mp->pg_num);
- printf(" pg_shift=%"PRIu32"\n", mp->pg_shift);
- printf(" pg_mask=%#tx\n", mp->pg_mask);
- printf(" elt_va_start=%#tx\n", mp->elt_va_start);
- printf(" elt_va_end=%#tx\n", mp->elt_va_end);
- printf(" elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]);
+ fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size);
+ fprintf(f, " pg_num=%"PRIu32"\n", mp->pg_num);
+ fprintf(f, " pg_shift=%"PRIu32"\n", mp->pg_shift);
+ fprintf(f, " pg_mask=%#tx\n", mp->pg_mask);
+ fprintf(f, " elt_va_start=%#tx\n", mp->elt_va_start);
+ fprintf(f, " elt_va_end=%#tx\n", mp->elt_va_end);
+ fprintf(f, " elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]);
if (mp->size != 0)
- printf(" avg bytes/object=%#Lf\n",
+ fprintf(f, " avg bytes/object=%#Lf\n",
(long double)(mp->elt_va_end - mp->elt_va_start) /
mp->size);
- cache_count = rte_mempool_dump_cache(mp);
+ cache_count = rte_mempool_dump_cache(f, mp);
common_count = rte_ring_count(mp->ring);
if ((cache_count + common_count) > mp->size)
common_count = mp->size - cache_count;
- printf(" common_pool_count=%u\n", common_count);
+ fprintf(f, " common_pool_count=%u\n", common_count);
/* sum and dump statistics */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
}
- printf(" stats:\n");
- printf(" put_bulk=%"PRIu64"\n", sum.put_bulk);
- printf(" put_objs=%"PRIu64"\n", sum.put_objs);
- printf(" get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
- printf(" get_success_objs=%"PRIu64"\n", sum.get_success_objs);
- printf(" get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
- printf(" get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
+ fprintf(f, " stats:\n");
+ fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
+ fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs);
+ fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
+ fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
+ fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
+ fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
#else
- printf(" no statistics available\n");
+ fprintf(f, " no statistics available\n");
#endif
rte_mempool_audit(mp);
/* dump the status of all mempools on the console */
void
-rte_mempool_list_dump(void)
+rte_mempool_list_dump(FILE *f)
{
const struct rte_mempool *mp = NULL;
struct rte_mempool_list *mempool_list;
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
TAILQ_FOREACH(mp, mempool_list, next) {
- rte_mempool_dump(mp);
+ rte_mempool_dump(f, mp);
}
rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
return mp;
}
+
+void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
+ void *arg)
+{
+ struct rte_mempool *mp = NULL;
+ struct rte_mempool_list *mempool_list;
+
+ if ((mempool_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ TAILQ_FOREACH(mp, mempool_list, next) {
+ (*func)(mp, arg);
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
+}