return 0;
}
+struct mlx4_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx4_check_mempool() when iterating the memory chunks. */
+static void mlx4_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx)
+{
+ struct mlx4_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contigous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx4_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
/* For best performance, this function should not be inlined. */
-static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *)
+static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
__attribute__((noinline));
/**
* Memory region pointer, NULL in case of error.
*/
static struct ibv_mr *
-mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start = (uintptr_t)STAILQ_FIRST(&mp->mem_list)->addr;
- uintptr_t end = start + STAILQ_FIRST(&mp->mem_list)->len;
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
+ if (mlx4_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
DEBUG("mempool %p area start=%p end=%p size=%zu",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
/* Round start and end to page boundary if found in memory segments. */
for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
end = RTE_ALIGN_CEIL(end, align);
}
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
return ibv_reg_mr(pd,
(void *)start,
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
}
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (const void *)mp);
+ (void *)txq, mp->name, (void *)mp);
mr = mlx4_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
return 0;
}
+struct mlx5_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx5_check_mempool() when iterating the memory chunks. */
+static void mlx5_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx)
+{
+ struct mlx5_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contigous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx5_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
/* For best performance, this function should not be inlined. */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *)
+struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *)
__attribute__((noinline));
/**
* Memory region pointer, NULL in case of error.
*/
struct ibv_mr *
-mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start = (uintptr_t)STAILQ_FIRST(&mp->mem_list)->addr;
- uintptr_t end = start + STAILQ_FIRST(&mp->mem_list)->len;
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
DEBUG("mempool %p area start=%p end=%p size=%zu",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
/* Round start and end to page boundary if found in memory segments. */
for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
end = RTE_ALIGN_CEIL(end, align);
}
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
return ibv_reg_mr(pd,
(void *)start,
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
}
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (const void *)mp);
+ (void *)txq, mp->name, (void *)mp);
mr = mlx5_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
/* mlx5_rxtx.c */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *);
+struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
void txq_mp2mr_iter(struct rte_mempool *, void *);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t);
char intf_name[RTE_KNI_NAMESIZE];
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
+ const struct rte_mempool *mp;
struct rte_kni_memzone_slot *slot = NULL;
if (!pktmbuf_pool || !conf || !conf->name[0])
/* MBUF mempool */
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT,
pktmbuf_pool->name);
mz = rte_memzone_lookup(mz_name);
KNI_MEM_CHECK(mz == NULL);
- dev_info.mbuf_va = mz->addr;
- dev_info.mbuf_phys = mz->phys_addr;
+ mp = (struct rte_mempool *)mz->addr;
+ /* KNI currently requires to have only one memory chunk */
+ if (mp->nb_mem_chunks != 1)
+ goto kni_fail;
+
+ dev_info.mbuf_va = STAILQ_FIRST(&mp->mem_list)->addr;
+ dev_info.mbuf_phys = STAILQ_FIRST(&mp->mem_list)->phys_addr;
ctx->pktmbuf_pool = pktmbuf_pool;
ctx->group_id = conf->group_id;
ctx->slot_id = slot->id;
if (pa == NULL)
return mp;
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME, name);
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_elt", name);
mz = rte_memzone_reserve(mz_name, sz, socket_id, mz_flags);
if (mz == NULL) {
free(pa);
}
/* free a memchunk allocated with rte_memzone_reserve() */
-__rte_unused static void
+static void
rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
void *opaque)
{
return cnt;
}
+/* Default function to populate the mempool: allocate memory in mezones,
+ * and populate them. Return the number of objects added, or a negative
+ * value on error.
+ */
+static int rte_mempool_populate_default(struct rte_mempool *mp)
+{
+ int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ size_t size, total_elt_sz, align;
+ unsigned mz_id, n;
+ int ret;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ align = RTE_CACHE_LINE_SIZE;
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
+ size = rte_mempool_xmem_size(n, total_elt_sz, 0);
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ ret = -ENAMETOOLONG;
+ goto fail;
+ }
+
+ mz = rte_memzone_reserve_aligned(mz_name, size,
+ mp->socket_id, mz_flags, align);
+ /* not enough memory, retry with the biggest zone we have */
+ if (mz == NULL)
+ mz = rte_memzone_reserve_aligned(mz_name, 0,
+ mp->socket_id, mz_flags, align);
+ if (mz == NULL) {
+ ret = -rte_errno;
+ goto fail;
+ }
+
+ ret = rte_mempool_populate_phys(mp, mz->addr, mz->phys_addr,
+ mz->len, rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
/*
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
struct rte_mempool_list *mempool_list;
struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te = NULL;
- const struct rte_memzone *mz;
+ const struct rte_memzone *mz = NULL;
size_t mempool_size;
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
- void *obj;
struct rte_mempool_objsz objsz;
- void *startaddr;
- int page_size = getpagesize();
int ret;
/* compilation-time checks */
private_data_size = (private_data_size +
RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
- if (! rte_eal_has_hugepages()) {
- /*
- * expand private data size to a whole page, so that the
- * first pool element will start on a new standard page
- */
- int head = sizeof(struct rte_mempool);
- int new_size = (private_data_size + head) % page_size;
- if (new_size)
- private_data_size += page_size - new_size;
- }
/* try to allocate tailq entry */
te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size);
mempool_size += private_data_size;
mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
- if (vaddr == NULL)
- mempool_size += (size_t)objsz.total_size * n;
-
- if (! rte_eal_has_hugepages()) {
- /*
- * we want the memory pool to start on a page boundary,
- * because pool elements crossing page boundaries would
- * result in discontiguous physical addresses
- */
- mempool_size += page_size;
- }
snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
if (mz == NULL)
goto exit_unlock;
- if (rte_eal_has_hugepages()) {
- startaddr = (void*)mz->addr;
- } else {
- /* align memory pool start address on a page boundary */
- unsigned long addr = (unsigned long)mz->addr;
- if (addr & (page_size - 1)) {
- addr += page_size;
- addr &= ~(page_size - 1);
- }
- startaddr = (void*)addr;
- }
-
/* init the mempool structure */
- mp = startaddr;
memset(mp, 0, sizeof(*mp));
snprintf(mp->name, sizeof(mp->name), "%s", name);
mp->phys_addr = mz->phys_addr;
mp_init(mp, mp_init_arg);
/* mempool elements allocated together with mempool */
- if (vaddr == NULL) {
- /* calculate address of the first elt for continuous mempool. */
- obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, cache_size) +
- private_data_size;
- obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
-
- ret = rte_mempool_populate_phys(mp, obj,
- mp->phys_addr + ((char *)obj - (char *)mp),
- objsz.total_size * n, NULL, NULL);
- if (ret != (int)mp->size)
- goto exit_unlock;
- } else {
+ if (vaddr == NULL)
+ ret = rte_mempool_populate_default(mp);
+ else
ret = rte_mempool_populate_phys_tab(mp, vaddr,
paddr, pg_num, pg_shift, NULL, NULL);
- if (ret != (int)mp->size)
- goto exit_unlock;
+ if (ret < 0) {
+ rte_errno = -ret;
+ goto exit_unlock;
+ } else if (ret != (int)mp->size) {
+ rte_errno = EINVAL;
+ goto exit_unlock;
}
/* call the initializer */
rte_ring_free(mp->ring);
}
rte_free(te);
+ if (mz != NULL)
+ rte_memzone_free(mz);
return NULL;
}
/* "MP_<name>" */
#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
-#ifdef RTE_LIBRTE_XEN_DOM0
-
-/* "<name>_MP_elt" */
-#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
-
-#else
-
-#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
-
-#endif /* RTE_LIBRTE_XEN_DOM0 */
-
#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
/** Mempool over one chunk of physically continuous memory */