typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
void * /*obj_start*/,
void * /*obj_end*/,
- uint32_t /*obj_index */);
+ uint32_t /*obj_index */,
+ phys_addr_t /*physaddr*/);
static void
-mempool_add_elem(struct rte_mempool *mp, void *obj)
+mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
obj = (char *)obj + mp->header_size;
+ physaddr += mp->header_size;
/* set mempool ptr in header */
hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
hdr->mp = mp;
+ hdr->physaddr = physaddr;
STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint32_t pgn, pgf;
uintptr_t end, start, va;
uintptr_t pg_sz;
+ phys_addr_t physaddr;
pg_sz = (uintptr_t)1 << pg_shift;
va = (uintptr_t)vaddr;
* otherwise, just skip that chunk unused.
*/
if (k == pgn) {
+ physaddr = paddr[k] + (start & (pg_sz - 1));
if (obj_iter != NULL)
obj_iter(obj_iter_arg, (void *)start,
- (void *)end, i);
+ (void *)end, i, physaddr);
va = end;
j += pgf;
i++;
static void
mempool_obj_populate(void *arg, void *start, void *end,
- __rte_unused uint32_t idx)
+ __rte_unused uint32_t idx, phys_addr_t physaddr)
{
struct rte_mempool *mp = arg;
- mempool_add_elem(mp, start);
+ mempool_add_elem(mp, start, physaddr);
mp->elt_va_end = (uintptr_t)end;
}
*/
static void
mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
- __rte_unused uint32_t idx)
+ __rte_unused uint32_t idx, __rte_unused phys_addr_t physaddr)
{
*(uintptr_t *)arg = (uintptr_t)end;
}
struct rte_mempool_objhdr {
STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the object. */
+ phys_addr_t physaddr; /**< Physical address of the object. */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
#endif
* The physical address of the elt element.
*/
static inline phys_addr_t
-rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
+rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
{
if (rte_eal_has_hugepages()) {
- uintptr_t off;
-
- off = (const char *)elt - (const char *)mp->elt_va_start;
- return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
+ const struct rte_mempool_objhdr *hdr;
+ hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
+ sizeof(*hdr));
+ return hdr->physaddr;
} else {
/*
* If huge pages are disabled, we cannot assume the