From 0de9eb6138bc41a1b435640bc40c23fa02f1c932 Mon Sep 17 00:00:00 2001 From: Alejandro Lucero Date: Thu, 1 Nov 2018 19:53:25 +0000 Subject: [PATCH] mem: rename DMA mask check with proper prefix Current name rte_eal_check_dma_mask does not follow the naming used in the rest of the file. Signed-off-by: Alejandro Lucero Tested-by: Ferruh Yigit --- doc/guides/rel_notes/release_18_11.rst | 2 +- drivers/bus/pci/linux/pci.c | 2 +- drivers/net/nfp/nfp_net.c | 2 +- lib/librte_eal/common/eal_common_memory.c | 4 ++-- lib/librte_eal/common/include/rte_memory.h | 2 +- lib/librte_eal/common/malloc_heap.c | 2 +- lib/librte_eal/rte_eal_version.map | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/guides/rel_notes/release_18_11.rst b/doc/guides/rel_notes/release_18_11.rst index 376128f684..11a27405cc 100644 --- a/doc/guides/rel_notes/release_18_11.rst +++ b/doc/guides/rel_notes/release_18_11.rst @@ -63,7 +63,7 @@ New Features * **Added check for ensuring allocated memory addressable by devices.** Some devices can have addressing limitations so a new function, - ``rte_eal_check_dma_mask``, has been added for checking allocated memory is + ``rte_mem_check_dma_mask``, has been added for checking allocated memory is not out of the device range. Because now memory can be dynamically allocated after initialization, a dma mask is kept and any new allocated memory will be checked out against that dma mask and rejected if out of range. If more than diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c index 1783ec7f53..7576b0e739 100644 --- a/drivers/bus/pci/linux/pci.c +++ b/drivers/bus/pci/linux/pci.c @@ -615,7 +615,7 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev) mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; - return rte_eal_check_dma_mask(mgaw) == 0 ? true : false; + return rte_mem_check_dma_mask(mgaw) == 0 ? true : false; } #elif defined(RTE_ARCH_PPC_64) static bool diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index bab1f68eba..54c6da9243 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -2703,7 +2703,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); /* NFP can not handle DMA addresses requiring more than 40 bits */ - if (rte_eal_check_dma_mask(40)) { + if (rte_mem_check_dma_mask(40)) { RTE_LOG(ERR, PMD, "device %s can not be used:", pci_dev->device.name); RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n"); diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 12dcedf5c4..e0f08f39a6 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -49,7 +49,7 @@ static uint64_t system_page_sz; * Current known limitations are 39 or 40 bits. Setting the starting address * at 4GB implies there are 508GB or 1020GB for mapping the available * hugepages. This is likely enough for most systems, although a device with - * addressing limitations should call rte_eal_check_dma_mask for ensuring all + * addressing limitations should call rte_mem_check_dma_mask for ensuring all * memory is within supported range. */ static uint64_t baseaddr = 0x100000000; @@ -447,7 +447,7 @@ check_iova(const struct rte_memseg_list *msl __rte_unused, /* check memseg iovas are within the required range based on dma mask */ int __rte_experimental -rte_eal_check_dma_mask(uint8_t maskbits) +rte_mem_check_dma_mask(uint8_t maskbits) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; uint64_t mask; diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h index ce9370582c..ad3f3cfb01 100644 --- a/lib/librte_eal/common/include/rte_memory.h +++ b/lib/librte_eal/common/include/rte_memory.h @@ -464,7 +464,7 @@ unsigned rte_memory_get_nchannel(void); unsigned rte_memory_get_nrank(void); /* check memsegs iovas are within a range based on dma mask */ -int __rte_experimental rte_eal_check_dma_mask(uint8_t maskbits); +int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits); /** * Drivers based on uio will not load unless physical diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c index d1019e3cd5..4997c5ef5d 100644 --- a/lib/librte_eal/common/malloc_heap.c +++ b/lib/librte_eal/common/malloc_heap.c @@ -322,7 +322,7 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, } if (mcfg->dma_maskbits) { - if (rte_eal_check_dma_mask(mcfg->dma_maskbits)) { + if (rte_mem_check_dma_mask(mcfg->dma_maskbits)) { RTE_LOG(ERR, EAL, "%s(): couldn't allocate memory due to DMA mask\n", __func__); diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map index 04f6242460..4eb16ee3be 100644 --- a/lib/librte_eal/rte_eal_version.map +++ b/lib/librte_eal/rte_eal_version.map @@ -295,7 +295,6 @@ EXPERIMENTAL { rte_devargs_parsef; rte_devargs_remove; rte_devargs_type_count; - rte_eal_check_dma_mask; rte_eal_cleanup; rte_fbarray_attach; rte_fbarray_destroy; @@ -331,6 +330,7 @@ EXPERIMENTAL { rte_malloc_heap_socket_is_external; rte_mem_alloc_validator_register; rte_mem_alloc_validator_unregister; + rte_mem_check_dma_mask; rte_mem_event_callback_register; rte_mem_event_callback_unregister; rte_mem_iova2virt; -- 2.20.1