struct buf_pool_cfg {
void *addr;
/**< The address from where DPAA2 will carve out the buffers */
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of the memory provided in addr */
uint32_t num;
/**< Number of buffers */
signed short buf_offset, unsigned int max_buf_count)
{
void *memptr = NULL;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
unsigned int memsz;
struct fpavf_res *fpa = NULL;
uint64_t reg;
static int
octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, phys_addr_t paddr, size_t len)
+ char *vaddr, rte_iova_t paddr, size_t len)
{
struct octeontx_pool_info *pool_info;
hdr = STAILQ_FIRST(&mp->mem_list);
assert(hdr != NULL);
- return (uint64_t)((uintptr_t)hdr->addr - hdr->phys_addr);
+ return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
}
static inline uint16_t
Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
mr->startPA =
- (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
mr->txQueueBits = index[i];
}
static void
-mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)
+mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
/* set mempool ptr in header */
hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
hdr->mp = mp;
- hdr->physaddr = physaddr;
+ hdr->iova = iova;
STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
mp->populated_size++;
*/
ssize_t
rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
uint32_t pg_shift, unsigned int flags)
{
uint32_t elt_cnt = 0;
- phys_addr_t start, end;
- uint32_t paddr_idx;
+ rte_iova_t start, end;
+ uint32_t iova_idx;
size_t pg_sz = (size_t)1 << pg_shift;
unsigned int mask;
/* alignment need one additional object */
elt_num += 1;
- /* if paddr is NULL, assume contiguous memory */
- if (paddr == NULL) {
+ /* if iova is NULL, assume contiguous memory */
+ if (iova == NULL) {
start = 0;
end = pg_sz * pg_num;
- paddr_idx = pg_num;
+ iova_idx = pg_num;
} else {
- start = paddr[0];
- end = paddr[0] + pg_sz;
- paddr_idx = 1;
+ start = iova[0];
+ end = iova[0] + pg_sz;
+ iova_idx = 1;
}
while (elt_cnt < elt_num) {
/* enough contiguous memory, add an object */
start += total_elt_sz;
elt_cnt++;
- } else if (paddr_idx < pg_num) {
+ } else if (iova_idx < pg_num) {
/* no room to store one obj, add a page */
- if (end == paddr[paddr_idx]) {
+ if (end == iova[iova_idx]) {
end += pg_sz;
} else {
- start = paddr[paddr_idx];
- end = paddr[paddr_idx] + pg_sz;
+ start = iova[iova_idx];
+ end = iova[iova_idx] + pg_sz;
}
- paddr_idx++;
+ iova_idx++;
} else {
/* no more page, return how many elements fit */
}
}
- return (size_t)paddr_idx << pg_shift;
+ return (size_t)iova_idx << pg_shift;
}
/* free a memchunk allocated with rte_memzone_reserve() */
memhdr->mp = mp;
memhdr->addr = vaddr;
- memhdr->phys_addr = paddr;
+ memhdr->iova = paddr;
memhdr->len = len;
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
- phys_addr_t paddr;
+ rte_iova_t iova;
size_t off, phys_len;
int ret, cnt = 0;
for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) {
- paddr = rte_mem_virt2iova(addr + off);
+ iova = rte_mem_virt2iova(addr + off);
- if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
+ if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) {
ret = -EINVAL;
goto fail;
}
/* populate with the largest group of contiguous pages */
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
- phys_addr_t paddr_tmp;
+ rte_iova_t iova_tmp;
- paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
+ iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
- if (paddr_tmp != paddr + phys_len)
+ if (iova_tmp != iova + phys_len)
break;
}
- ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+ ret = rte_mempool_populate_phys(mp, addr + off, iova,
phys_len, free_cb, opaque);
if (ret < 0)
goto fail;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
size_t size, total_elt_sz, align, pg_sz, pg_shift;
- phys_addr_t paddr;
+ rte_iova_t iova;
unsigned mz_id, n;
unsigned int mp_flags;
int ret;
}
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- paddr = RTE_BAD_PHYS_ADDR;
+ iova = RTE_BAD_IOVA;
else
- paddr = mz->iova;
+ iova = mz->iova;
if (rte_eal_has_hugepages())
ret = rte_mempool_populate_phys(mp, mz->addr,
- paddr, mz->len,
+ iova, mz->len,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
else
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift)
{
struct rte_mempool *mp = NULL;
int ret;
obj_init, obj_init_arg, socket_id, flags);
/* check that we have both VA and PA */
- if (paddr == NULL) {
+ if (iova == NULL) {
rte_errno = EINVAL;
return NULL;
}
if (mp_init)
mp_init(mp, mp_init_arg);
- ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ ret = rte_mempool_populate_phys_tab(mp, vaddr, iova, pg_num, pg_shift,
NULL, NULL);
if (ret < 0 || ret != (int)mp->size)
goto fail;
struct rte_mempool_objhdr {
STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the object. */
- phys_addr_t physaddr; /**< Physical address of the object. */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the object. */
+ phys_addr_t physaddr; /**< deprecated - Physical address of the object. */
+ };
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
#endif
STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the chunk */
void *addr; /**< Virtual address of the chunk */
- phys_addr_t phys_addr; /**< Physical address of the chunk */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the chunk */
+ phys_addr_t phys_addr; /**< Physical address of the chunk */
+ };
size_t len; /**< length of the chunk */
rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
void *opaque; /**< Argument passed to the free callback */
* Notify new memory area to mempool.
*/
typedef int (*rte_mempool_ops_register_memory_area_t)
-(const struct rte_mempool *mp, char *vaddr, phys_addr_t paddr, size_t len);
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
* Pointer to the memory pool.
* @param vaddr
* Pointer to the buffer virtual address.
- * @param paddr
- * Pointer to the buffer physical address.
+ * @param iova
+ * Pointer to the buffer IO address.
* @param len
* Pool size.
* @return
*/
int
rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, phys_addr_t paddr, size_t len);
+ char *vaddr, rte_iova_t iova, size_t len);
/**
* @internal wrapper for mempool_ops free callback.
* @param vaddr
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @return
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
/**
* Create an empty mempool
const struct rte_mempool_objhdr *hdr;
hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
sizeof(*hdr));
- return hdr->physaddr;
+ return hdr->iova;
}
/**
* @param total_elt_sz
* The size of each element, including header and trailer, as returned
* by rte_mempool_calc_obj_size().
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @param flags
* is the actual number of elements that can be stored in that buffer.
*/
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
uint32_t pg_shift, unsigned int flags);
/**
/* wrapper to notify new memory area to external mempool */
int
rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
- phys_addr_t paddr, size_t len)
+ rte_iova_t iova, size_t len)
{
struct rte_mempool_ops *ops;
ops = rte_mempool_get_ops(mp->ops_index);
RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP);
- return ops->register_memory_area(mp, vaddr, paddr, len);
+ return ops->register_memory_area(mp, vaddr, iova, len);
}
/* sets mempool ops previously registered by rte_mempool_register_ops. */