* **Added check for ensuring allocated memory addressable by devices.**
Some devices can have addressing limitations so a new function,
- ``rte_eal_check_dma_mask``, has been added for checking allocated memory is
+ ``rte_mem_check_dma_mask``, has been added for checking allocated memory is
not out of the device range. Because now memory can be dynamically allocated
after initialization, a dma mask is kept and any new allocated memory will be
checked out against that dma mask and rejected if out of range. If more than
mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
- return rte_eal_check_dma_mask(mgaw) == 0 ? true : false;
+ return rte_mem_check_dma_mask(mgaw) == 0 ? true : false;
}
#elif defined(RTE_ARCH_PPC_64)
static bool
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* NFP can not handle DMA addresses requiring more than 40 bits */
- if (rte_eal_check_dma_mask(40)) {
+ if (rte_mem_check_dma_mask(40)) {
RTE_LOG(ERR, PMD, "device %s can not be used:",
pci_dev->device.name);
RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
* Current known limitations are 39 or 40 bits. Setting the starting address
* at 4GB implies there are 508GB or 1020GB for mapping the available
* hugepages. This is likely enough for most systems, although a device with
- * addressing limitations should call rte_eal_check_dma_mask for ensuring all
+ * addressing limitations should call rte_mem_check_dma_mask for ensuring all
* memory is within supported range.
*/
static uint64_t baseaddr = 0x100000000;
/* check memseg iovas are within the required range based on dma mask */
int __rte_experimental
-rte_eal_check_dma_mask(uint8_t maskbits)
+rte_mem_check_dma_mask(uint8_t maskbits)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t mask;
unsigned rte_memory_get_nrank(void);
/* check memsegs iovas are within a range based on dma mask */
-int __rte_experimental rte_eal_check_dma_mask(uint8_t maskbits);
+int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
/**
* Drivers based on uio will not load unless physical
}
if (mcfg->dma_maskbits) {
- if (rte_eal_check_dma_mask(mcfg->dma_maskbits)) {
+ if (rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
"%s(): couldn't allocate memory due to DMA mask\n",
__func__);
rte_devargs_parsef;
rte_devargs_remove;
rte_devargs_type_count;
- rte_eal_check_dma_mask;
rte_eal_cleanup;
rte_fbarray_attach;
rte_fbarray_destroy;
rte_malloc_heap_socket_is_external;
rte_mem_alloc_validator_register;
rte_mem_alloc_validator_unregister;
+ rte_mem_check_dma_mask;
rte_mem_event_callback_register;
rte_mem_event_callback_unregister;
rte_mem_iova2virt;