X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Frte_memory.h;h=4aa5d1f77566cdcb1c3d3d6d619b88e6b5e1c0c6;hb=c23a1a300081d5e2b92d901ef0649c437b00f9e1;hp=7f21244ac8d3cc4a32fcabe85b3a8940c84b22d1;hpb=3031749c2df04a63cdcef186dcce3781e61436e8;p=dpdk.git diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h index 7f21244ac8..4aa5d1f775 100644 --- a/lib/librte_eal/common/include/rte_memory.h +++ b/lib/librte_eal/common/include/rte_memory.h @@ -42,6 +42,9 @@ #include #include +#include + +#include #ifdef RTE_EXEC_ENV_LINUXAPP #include @@ -51,26 +54,49 @@ extern "C" { #endif +#include + +__extension__ enum rte_page_sizes { - RTE_PGSIZE_4K = 1 << 12, - RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9, - RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9 + RTE_PGSIZE_4K = 1ULL << 12, + RTE_PGSIZE_64K = 1ULL << 16, + RTE_PGSIZE_256K = 1ULL << 18, + RTE_PGSIZE_2M = 1ULL << 21, + RTE_PGSIZE_16M = 1ULL << 24, + RTE_PGSIZE_256M = 1ULL << 28, + RTE_PGSIZE_512M = 1ULL << 29, + RTE_PGSIZE_1G = 1ULL << 30, + RTE_PGSIZE_4G = 1ULL << 32, + RTE_PGSIZE_16G = 1ULL << 34, }; #define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ -#ifndef CACHE_LINE_SIZE -#define CACHE_LINE_SIZE 64 /**< Cache line size. */ -#endif -#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */ +#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */ -#define CACHE_LINE_ROUNDUP(size) \ - (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE)) +#define RTE_CACHE_LINE_ROUNDUP(size) \ + (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) /**< Return the first cache-aligned value greater or equal to size. */ +/**< Cache line size in terms of log2 */ +#if RTE_CACHE_LINE_SIZE == 64 +#define RTE_CACHE_LINE_SIZE_LOG2 6 +#elif RTE_CACHE_LINE_SIZE == 128 +#define RTE_CACHE_LINE_SIZE_LOG2 7 +#else +#error "Unsupported cache line size" +#endif + +#define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */ + /** * Force alignment to cache line. */ -#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE))) +#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) + +/** + * Force minimum cache line alignment. + */ +#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) typedef uint64_t phys_addr_t; /**< Physical address definition. */ #define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) @@ -80,15 +106,13 @@ typedef uint64_t phys_addr_t; /**< Physical address definition. */ */ struct rte_memseg { phys_addr_t phys_addr; /**< Start physical address. */ + RTE_STD_C11 union { void *addr; /**< Start virtual address. */ uint64_t addr_64; /**< Makes sure addr is always 64 bits */ }; -#ifdef RTE_LIBRTE_IVSHMEM - phys_addr_t ioremap_addr; /**< Real physical address inside the VM */ -#endif size_t len; /**< Length of the segment. */ - size_t hugepage_sz; /**< The pagesize of underlying memory */ + uint64_t hugepage_sz; /**< The pagesize of underlying memory */ int32_t socket_id; /**< NUMA socket ID. */ uint32_t nchannel; /**< Number of channels. */ uint32_t nrank; /**< Number of ranks. */ @@ -96,7 +120,7 @@ struct rte_memseg { /**< store segment MFNs */ uint64_t mfn[DOM0_NUM_MEMBLOCK]; #endif -} __attribute__((__packed__)); +} __rte_packed; /** * Lock page in physical memory and prevent from swapping. @@ -138,7 +162,7 @@ phys_addr_t rte_mem_virt2phy(const void *virt); const struct rte_memseg *rte_eal_get_physmem_layout(void); /** - * Dump the physical memory layout to the console. + * Dump the physical memory layout to a file. * * @param f * A pointer to a file for output @@ -172,18 +196,33 @@ unsigned rte_memory_get_nchannel(void); unsigned rte_memory_get_nrank(void); #ifdef RTE_LIBRTE_XEN_DOM0 + +/**< Internal use only - should DOM0 memory mapping be used */ +int rte_xen_dom0_supported(void); + +/**< Internal use only - phys to virt mapping for xen */ +phys_addr_t rte_xen_mem_phy2mch(int32_t, const phys_addr_t); + /** * Return the physical address of elt, which is an element of the pool mp. * * @param memseg_id - * The mempool is from which memory segment. + * Identifier of the memory segment owning the physical address. If + * set to -1, find it automatically. * @param phy_addr * physical address of elt. * * @return - * The physical address or error. + * The physical address or RTE_BAD_PHYS_ADDR on error. */ -phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr); +static inline phys_addr_t +rte_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr) +{ + if (rte_xen_dom0_supported()) + return rte_xen_mem_phy2mch(memseg_id, phy_addr); + else + return phy_addr; +} /** * Memory init for supporting application running on Xen domain0. @@ -192,7 +231,7 @@ phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr); * * @return * 0: successfully - * negative: error + * negative: error */ int rte_xen_dom0_memory_init(void); @@ -206,7 +245,19 @@ int rte_xen_dom0_memory_init(void); * negative: error */ int rte_xen_dom0_memory_attach(void); +#else +static inline int rte_xen_dom0_supported(void) +{ + return 0; +} + +static inline phys_addr_t +rte_mem_phy2mch(int32_t memseg_id __rte_unused, const phys_addr_t phy_addr) +{ + return phy_addr; +} #endif + #ifdef __cplusplus } #endif