X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Frte_memory.h;h=4aa5d1f77566cdcb1c3d3d6d619b88e6b5e1c0c6;hb=c23a1a300081d5e2b92d901ef0649c437b00f9e1;hp=b44db0032a7524eab565b1cf2cc769f173e9574d;hpb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;p=dpdk.git diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h index b44db0032a..4aa5d1f775 100644 --- a/lib/librte_eal/common/include/rte_memory.h +++ b/lib/librte_eal/common/include/rte_memory.h @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -42,48 +42,107 @@ #include #include +#include + +#include + +#ifdef RTE_EXEC_ENV_LINUXAPP +#include +#endif #ifdef __cplusplus extern "C" { #endif +#include + +__extension__ enum rte_page_sizes { - RTE_PGSIZE_4K = 1 << 12, - RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9, - RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9 + RTE_PGSIZE_4K = 1ULL << 12, + RTE_PGSIZE_64K = 1ULL << 16, + RTE_PGSIZE_256K = 1ULL << 18, + RTE_PGSIZE_2M = 1ULL << 21, + RTE_PGSIZE_16M = 1ULL << 24, + RTE_PGSIZE_256M = 1ULL << 28, + RTE_PGSIZE_512M = 1ULL << 29, + RTE_PGSIZE_1G = 1ULL << 30, + RTE_PGSIZE_4G = 1ULL << 32, + RTE_PGSIZE_16G = 1ULL << 34, }; #define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ -#define CACHE_LINE_SIZE 64 /**< Cache line size. */ -#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */ +#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */ -#define CACHE_LINE_ROUNDUP(size) \ - (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE)) +#define RTE_CACHE_LINE_ROUNDUP(size) \ + (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) /**< Return the first cache-aligned value greater or equal to size. */ +/**< Cache line size in terms of log2 */ +#if RTE_CACHE_LINE_SIZE == 64 +#define RTE_CACHE_LINE_SIZE_LOG2 6 +#elif RTE_CACHE_LINE_SIZE == 128 +#define RTE_CACHE_LINE_SIZE_LOG2 7 +#else +#error "Unsupported cache line size" +#endif + +#define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */ + /** * Force alignment to cache line. */ -#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE))) +#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) + +/** + * Force minimum cache line alignment. + */ +#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) typedef uint64_t phys_addr_t; /**< Physical address definition. */ +#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) /** * Physical memory segment descriptor. */ struct rte_memseg { phys_addr_t phys_addr; /**< Start physical address. */ + RTE_STD_C11 union { void *addr; /**< Start virtual address. */ uint64_t addr_64; /**< Makes sure addr is always 64 bits */ }; size_t len; /**< Length of the segment. */ - size_t hugepage_sz; /**< The pagesize of underlying memory */ + uint64_t hugepage_sz; /**< The pagesize of underlying memory */ int32_t socket_id; /**< NUMA socket ID. */ uint32_t nchannel; /**< Number of channels. */ uint32_t nrank; /**< Number of ranks. */ -} __attribute__((__packed__)); +#ifdef RTE_LIBRTE_XEN_DOM0 + /**< store segment MFNs */ + uint64_t mfn[DOM0_NUM_MEMBLOCK]; +#endif +} __rte_packed; +/** + * Lock page in physical memory and prevent from swapping. + * + * @param virt + * The virtual address. + * @return + * 0 on success, negative on error. + */ +int rte_mem_lock_page(const void *virt); + +/** + * Get physical address of any mapped virtual address in the current process. + * It is found by browsing the /proc/self/pagemap special file. + * The page must be locked. + * + * @param virt + * The virtual address. + * @return + * The physical address or RTE_BAD_PHYS_ADDR on error. + */ +phys_addr_t rte_mem_virt2phy(const void *virt); /** * Get the layout of the available physical memory. @@ -103,9 +162,12 @@ struct rte_memseg { const struct rte_memseg *rte_eal_get_physmem_layout(void); /** - * Dump the physical memory layout to the console. + * Dump the physical memory layout to a file. + * + * @param f + * A pointer to a file for output */ -void rte_dump_physmem_layout(void); +void rte_dump_physmem_layout(FILE *f); /** * Get the total amount of available physical memory. @@ -133,6 +195,69 @@ unsigned rte_memory_get_nchannel(void); */ unsigned rte_memory_get_nrank(void); +#ifdef RTE_LIBRTE_XEN_DOM0 + +/**< Internal use only - should DOM0 memory mapping be used */ +int rte_xen_dom0_supported(void); + +/**< Internal use only - phys to virt mapping for xen */ +phys_addr_t rte_xen_mem_phy2mch(int32_t, const phys_addr_t); + +/** + * Return the physical address of elt, which is an element of the pool mp. + * + * @param memseg_id + * Identifier of the memory segment owning the physical address. If + * set to -1, find it automatically. + * @param phy_addr + * physical address of elt. + * + * @return + * The physical address or RTE_BAD_PHYS_ADDR on error. + */ +static inline phys_addr_t +rte_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr) +{ + if (rte_xen_dom0_supported()) + return rte_xen_mem_phy2mch(memseg_id, phy_addr); + else + return phy_addr; +} + +/** + * Memory init for supporting application running on Xen domain0. + * + * @param void + * + * @return + * 0: successfully + * negative: error + */ +int rte_xen_dom0_memory_init(void); + +/** + * Attach to memory setments of primary process on Xen domain0. + * + * @param void + * + * @return + * 0: successfully + * negative: error + */ +int rte_xen_dom0_memory_attach(void); +#else +static inline int rte_xen_dom0_supported(void) +{ + return 0; +} + +static inline phys_addr_t +rte_mem_phy2mch(int32_t memseg_id __rte_unused, const phys_addr_t phy_addr) +{ + return phy_addr; +} +#endif + #ifdef __cplusplus } #endif