memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
- phys_fq = rte_mem_virt2phy(fq);
+ phys_fq = rte_mem_virt2iova(fq);
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
#define EQCR_CARRYCLEAR(p) \
(void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
-extern dma_addr_t rte_mem_virt2phy(const void *addr);
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
/* Bit-wise logic to convert a ring pointer to a ring index */
static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
- rte_mem_virt2phy(vlan_table));
+ rte_mem_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(mask);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
}
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"Unable to map response buffer to physical memory.\n");
}
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD,
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr[0] =
- rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD,
"unable to map buffer address to physical memory\n");
rte_mem_lock_page(buf);
if (buf == NULL)
return -ENOMEM;
- dma_handle = rte_mem_virt2phy(buf);
+ dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2phy(buf);
+ dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2phy(buf);
+ dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
+ "Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map ring address to physical memory\n");
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map vnic address to physical memory\n");
m = m->next;
}
- phyaddr = rte_mem_virt2phy(g->sg);
+ phyaddr = rte_mem_virt2iova(g->sg);
if (phyaddr == RTE_BAD_PHYS_ADDR) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
__rte_crypto_op_reset(op, type);
- op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->phys_addr = rte_mem_virt2iova(_op_data);
op->mempool = mempool;
}
(void)virtaddr;
return RTE_BAD_IOVA;
}
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+ return rte_mem_virt2phy(virtaddr);
+}
int
rte_eal_hugepage_init(void)
*/
phys_addr_t rte_mem_virt2phy(const void *virt);
+/**
+ * Get IO virtual address of any mapped virtual address in the current process.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * The IO address or RTE_BAD_IOVA on error.
+ */
+rte_iova_t rte_mem_virt2iova(const void *virt);
+
/**
* Get the layout of the available physical memory.
*
int page_size;
off_t offset;
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- return (uintptr_t)virtaddr;
-
/* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
if (!phys_addrs_available)
return RTE_BAD_IOVA;
return physaddr;
}
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ return (uintptr_t)virtaddr;
+ return rte_mem_virt2phy(virtaddr);
+}
+
/*
* For each hugepage in hugepg_tbl, fill the physaddr value. We find
* it by browsing the /proc/self/pagemap special file.
rte_eal_using_phys_addrs;
rte_eal_vfio_intr_mode;
rte_lcore_has_role;
+ rte_mem_virt2iova;
rte_memcpy_ptr;
rte_vfio_enable;
rte_vfio_is_enabled;
for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) {
- paddr = rte_mem_virt2phy(addr + off);
+ paddr = rte_mem_virt2iova(addr + off);
if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL;
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
phys_addr_t paddr_tmp;
- paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
+ paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
if (paddr_tmp != paddr + phys_len)
break;
uint64_t host_phys_addr;
uint64_t size;
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size);
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
GOTO_ERR(ret, out);
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
printf("get physical address of an object\n");
- if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
GOTO_ERR(ret, out);
#endif