]> git.droids-corp.org - dpdk.git/commitdiff
mem: rename address mapping function to IOVA
authorThomas Monjalon <thomas@monjalon.net>
Sat, 4 Nov 2017 16:15:04 +0000 (17:15 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Mon, 6 Nov 2017 21:24:19 +0000 (22:24 +0100)
The function rte_mem_virt2phy() is kept and used in functions which
works only with physical addresses.
For all other calls this function is replaced by rte_mem_virt2iova()
which does a direct mapping (no conversion) in the VA case.

Note: the new function rte_mem_virt2iova() function matches the
behaviour implemented in rte_mem_virt2phy() by the commit
680f6c12600f ("mem: honor IOVA mode in virt2phy")

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
15 files changed:
drivers/bus/dpaa/base/qbman/qman.c
drivers/bus/dpaa/base/qbman/qman.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_vnic.c
drivers/net/liquidio/lio_rxtx.c
lib/librte_cryptodev/rte_cryptodev.c
lib/librte_eal/bsdapp/eal/eal_memory.c
lib/librte_eal/common/include/rte_memory.h
lib/librte_eal/linuxapp/eal/eal_memory.c
lib/librte_eal/rte_eal_version.map
lib/librte_mempool/rte_mempool.c
lib/librte_vhost/vhost_user.c
test/test/test_mempool.c

index 8c8d270f86e36c136edfe5d91ae7f1cf6276c8c2..87fec60d1a6028e09bc6fde2c66f72a409411ce2 100644 (file)
@@ -1351,7 +1351,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
                        memset(&mcc->initfq.fqd.context_a, 0,
                               sizeof(mcc->initfq.fqd.context_a));
                } else {
-                       phys_fq = rte_mem_virt2phy(fq);
+                       phys_fq = rte_mem_virt2iova(fq);
                        qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
                }
        }
index 7c645f478f6d97353ad03ebdc5683c540ca13a20..2c0f694cd6ea3de860250a72565144c662671b25 100644 (file)
@@ -240,7 +240,7 @@ struct qm_portal {
 #define EQCR_CARRYCLEAR(p) \
        (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
 
-extern dma_addr_t rte_mem_virt2phy(const void *addr);
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
 
 /* Bit-wise logic to convert a ring pointer to a ring index */
 static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
index b6c1daec74f9ca1ea4a2dcb8fbeea32159133bc6..9dc3901e12c9b5f42c9ff584323950d43c053b03 100644 (file)
@@ -2852,8 +2852,8 @@ skip_init:
                        RTE_LOG(WARNING, PMD,
                                "Memzone physical address same as virtual.\n");
                        RTE_LOG(WARNING, PMD,
-                               "Using rte_mem_virt2phy()\n");
-                       mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                               "Using rte_mem_virt2iova()\n");
+                       mz_phys_addr = rte_mem_virt2iova(mz->addr);
                        if (mz_phys_addr == 0) {
                                RTE_LOG(ERR, PMD,
                                "unable to map address to physical memory\n");
@@ -2887,8 +2887,8 @@ skip_init:
                        RTE_LOG(WARNING, PMD,
                                "Memzone physical address same as virtual.\n");
                        RTE_LOG(WARNING, PMD,
-                               "Using rte_mem_virt2phy()\n");
-                       mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                               "Using rte_mem_virt2iova()\n");
+                       mz_phys_addr = rte_mem_virt2iova(mz->addr);
                        if (mz_phys_addr == 0) {
                                RTE_LOG(ERR, PMD,
                                "unable to map address to physical memory\n");
index 6e0e40f82b8103c198edb074bc10aae2a99eaf8e..1fdc51cba8482b80eecb9bd835260b7551b79263 100644 (file)
@@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
                req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2phy(vlan_table));
+                        rte_mem_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -644,7 +644,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "Unable to map response buffer to physical memory.\n");
@@ -670,7 +670,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == 0) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        RTE_LOG(ERR, PMD,
@@ -1753,7 +1753,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -2622,7 +2622,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr[0] =
-               rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr[0] == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map buffer address to physical memory\n");
@@ -3044,7 +3044,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
        rte_mem_lock_page(buf);
        if (buf == NULL)
                return -ENOMEM;
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3080,7 +3080,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3141,7 +3141,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3196,7 +3196,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == 0) {
                HWRM_UNLOCK();
index 583c82569d4e64df3b69429fd7e83e8e2f581d71..efec2048ed266956ac7f19c01ed66c158e8188a9 100644 (file)
@@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                RTE_LOG(WARNING, PMD,
                        "Memzone physical address same as virtual.\n");
                RTE_LOG(WARNING, PMD,
-                       "Using rte_mem_virt2phy()\n");
+                       "Using rte_mem_virt2iova()\n");
                for (sz = 0; sz < total_alloc_len; sz += getpagesize())
                        rte_mem_lock_page(((char *)mz->addr) + sz);
-               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+               mz_phys_addr = rte_mem_virt2iova(mz->addr);
                if (mz_phys_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "unable to map ring address to physical memory\n");
index 6f7c05bdfd2a6be21d1918b6ef43d4d2c72dbf2f..cbcf9920af8e23622f3ade77bbc5ac4be2acd074 100644 (file)
@@ -197,8 +197,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                RTE_LOG(WARNING, PMD,
                        "Memzone physical address same as virtual.\n");
                RTE_LOG(WARNING, PMD,
-                       "Using rte_mem_virt2phy()\n");
-               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                       "Using rte_mem_virt2iova()\n");
+               mz_phys_addr = rte_mem_virt2iova(mz->addr);
                if (mz_phys_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "unable to map vnic address to physical memory\n");
index 2bbb893c27e423e49d12b027ae3c450d40c9b381..773dfd30a13b77c4c37689e3f085afd14f0919c7 100644 (file)
@@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
                                m = m->next;
                        }
 
-                       phyaddr = rte_mem_virt2phy(g->sg);
+                       phyaddr = rte_mem_virt2iova(g->sg);
                        if (phyaddr == RTE_BAD_PHYS_ADDR) {
                                PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
                                goto xmit_failed;
index 80632117e088a0d9bd8b7c1b9fb068d2f06ac0b5..e5f287634a97cfd561a61198fcb9a365d6eec5f8 100644 (file)
@@ -1284,7 +1284,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
 
        __rte_crypto_op_reset(op, type);
 
-       op->phys_addr = rte_mem_virt2phy(_op_data);
+       op->phys_addr = rte_mem_virt2iova(_op_data);
        op->mempool = mempool;
 }
 
index 66fab768f717cb03444f50c0f025a031373a6d59..0e021fff1dba885de1a55feb0f9c9d1e5cb2353b 100644 (file)
@@ -58,6 +58,11 @@ rte_mem_virt2phy(const void *virtaddr)
        (void)virtaddr;
        return RTE_BAD_IOVA;
 }
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+       return rte_mem_virt2phy(virtaddr);
+}
 
 int
 rte_eal_hugepage_init(void)
index d16b284b50fde9b14acd4dc7d894f67decbcb048..14aacea545e82cbf983a70fec80ec00d9946ea97 100644 (file)
@@ -147,6 +147,16 @@ int rte_mem_lock_page(const void *virt);
  */
 phys_addr_t rte_mem_virt2phy(const void *virt);
 
+/**
+ * Get IO virtual address of any mapped virtual address in the current process.
+ *
+ * @param virt
+ *   The virtual address.
+ * @return
+ *   The IO address or RTE_BAD_IOVA on error.
+ */
+rte_iova_t rte_mem_virt2iova(const void *virt);
+
 /**
  * Get the layout of the available physical memory.
  *
index 284758ac40bd7e6c356c4c7cb867ab820813881b..a54b822abe7025cf9315b29c6099318b9d7c436f 100644 (file)
@@ -128,9 +128,6 @@ rte_mem_virt2phy(const void *virtaddr)
        int page_size;
        off_t offset;
 
-       if (rte_eal_iova_mode() == RTE_IOVA_VA)
-               return (uintptr_t)virtaddr;
-
        /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
        if (!phys_addrs_available)
                return RTE_BAD_IOVA;
@@ -180,6 +177,14 @@ rte_mem_virt2phy(const void *virtaddr)
        return physaddr;
 }
 
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+       if (rte_eal_iova_mode() == RTE_IOVA_VA)
+               return (uintptr_t)virtaddr;
+       return rte_mem_virt2phy(virtaddr);
+}
+
 /*
  * For each hugepage in hugepg_tbl, fill the physaddr value. We find
  * it by browsing the /proc/self/pagemap special file.
index e643202b4f60bea075a8a32ac24f23fc8f9ef9fe..5635adb4b937beef5bb8b2c7bc5eac56a4b1eaa5 100644 (file)
@@ -233,6 +233,7 @@ DPDK_17.11 {
        rte_eal_using_phys_addrs;
        rte_eal_vfio_intr_mode;
        rte_lcore_has_role;
+       rte_mem_virt2iova;
        rte_memcpy_ptr;
        rte_vfio_enable;
        rte_vfio_is_enabled;
index 6357fd48e7315f389a1f800137669179dda3f924..f62054e2379b8dca050b3605e733183f9165f45d 100644 (file)
@@ -503,7 +503,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
        for (off = 0; off + pg_sz <= len &&
                     mp->populated_size < mp->size; off += phys_len) {
 
-               paddr = rte_mem_virt2phy(addr + off);
+               paddr = rte_mem_virt2iova(addr + off);
 
                if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
                        ret = -EINVAL;
@@ -514,7 +514,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
                for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
                        phys_addr_t paddr_tmp;
 
-                       paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
+                       paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
 
                        if (paddr_tmp != paddr + phys_len)
                                break;
index 1f6cba4b942572d7a997cb0f549487d5c31e46b7..97a5c3f1c7050404d7498e01e0e39b2588ec137c 100644 (file)
@@ -526,7 +526,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
        uint64_t host_phys_addr;
        uint64_t size;
 
-       host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+       host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
        size = page_size - (guest_phys_addr & (page_size - 1));
        size = RTE_MIN(size, reg_size);
 
@@ -537,7 +537,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
 
        while (reg_size > 0) {
                size = RTE_MIN(reg_size, page_size);
-               host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+               host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
                                                  host_user_addr);
                add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
 
index dee73d6583917b7c5235f276e777d24fe00087b6..fa8bad39f8f79457b048e48ffee3e9b1f92d3b55 100644 (file)
@@ -144,9 +144,9 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
                        MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
                GOTO_ERR(ret, out);
 
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
        printf("get physical address of an object\n");
-       if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+       if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
                GOTO_ERR(ret, out);
 #endif