X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Frte_memory.h;h=f8dbece0045ca6d7b1cb1374eebbb6387805ca59;hb=6a34f91690d0;hp=13138121d39fd2655267cc77740f602c419e185b;hpb=591a9d7985c1230652d9f7ea1f9221e8c66ec188;p=dpdk.git diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h index 13138121d3..f8dbece004 100644 --- a/lib/librte_eal/common/include/rte_memory.h +++ b/lib/librte_eal/common/include/rte_memory.h @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -42,6 +42,7 @@ #include #include +#include #ifdef RTE_EXEC_ENV_LINUXAPP #include @@ -51,26 +52,48 @@ extern "C" { #endif +#include + enum rte_page_sizes { - RTE_PGSIZE_4K = 1 << 12, - RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9, - RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9 + RTE_PGSIZE_4K = 1ULL << 12, + RTE_PGSIZE_64K = 1ULL << 16, + RTE_PGSIZE_256K = 1ULL << 18, + RTE_PGSIZE_2M = 1ULL << 21, + RTE_PGSIZE_16M = 1ULL << 24, + RTE_PGSIZE_256M = 1ULL << 28, + RTE_PGSIZE_512M = 1ULL << 29, + RTE_PGSIZE_1G = 1ULL << 30, + RTE_PGSIZE_4G = 1ULL << 32, + RTE_PGSIZE_16G = 1ULL << 34, }; #define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ -#ifndef CACHE_LINE_SIZE -#define CACHE_LINE_SIZE 64 /**< Cache line size. */ -#endif -#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */ +#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */ -#define CACHE_LINE_ROUNDUP(size) \ - (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE)) +#define RTE_CACHE_LINE_ROUNDUP(size) \ + (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) /**< Return the first cache-aligned value greater or equal to size. */ +/**< Cache line size in terms of log2 */ +#if RTE_CACHE_LINE_SIZE == 64 +#define RTE_CACHE_LINE_SIZE_LOG2 6 +#elif RTE_CACHE_LINE_SIZE == 128 +#define RTE_CACHE_LINE_SIZE_LOG2 7 +#else +#error "Unsupported cache line size" +#endif + +#define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */ + /** * Force alignment to cache line. */ -#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE))) +#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) + +/** + * Force minimum cache line alignment. + */ +#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) typedef uint64_t phys_addr_t; /**< Physical address definition. */ #define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) @@ -88,15 +111,15 @@ struct rte_memseg { phys_addr_t ioremap_addr; /**< Real physical address inside the VM */ #endif size_t len; /**< Length of the segment. */ - size_t hugepage_sz; /**< The pagesize of underlying memory */ + uint64_t hugepage_sz; /**< The pagesize of underlying memory */ int32_t socket_id; /**< NUMA socket ID. */ uint32_t nchannel; /**< Number of channels. */ uint32_t nrank; /**< Number of ranks. */ #ifdef RTE_LIBRTE_XEN_DOM0 /**< store segment MFNs */ - uint64_t mfn[DOM0_NUM_MEMBLOCK]; + uint64_t mfn[DOM0_NUM_MEMBLOCK]; #endif -} __attribute__((__packed__)); +} __rte_packed; /** * Lock page in physical memory and prevent from swapping. @@ -172,10 +195,17 @@ unsigned rte_memory_get_nchannel(void); unsigned rte_memory_get_nrank(void); #ifdef RTE_LIBRTE_XEN_DOM0 + +/**< Internal use only - should DOM0 memory mapping be used */ +int rte_xen_dom0_supported(void); + +/**< Internal use only - phys to virt mapping for xen */ +phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t); + /** * Return the physical address of elt, which is an element of the pool mp. * - * @param memseg_id + * @param memseg_id * The mempool is from which memory segment. * @param phy_addr * physical address of elt. @@ -183,30 +213,49 @@ unsigned rte_memory_get_nrank(void); * @return * The physical address or error. */ -phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr); +static inline phys_addr_t +rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr) +{ + if (rte_xen_dom0_supported()) + return rte_xen_mem_phy2mch(memseg_id, phy_addr); + else + return phy_addr; +} /** - * Memory init for supporting application running on Xen domain0. - * - * @param void - * - * @return + * Memory init for supporting application running on Xen domain0. + * + * @param void + * + * @return * 0: successfully - * negative: error - */ + * negative: error + */ int rte_xen_dom0_memory_init(void); /** - * Attach to memory setments of primary process on Xen domain0. - * - * @param void - * - * @return + * Attach to memory setments of primary process on Xen domain0. + * + * @param void + * + * @return * 0: successfully * negative: error */ int rte_xen_dom0_memory_attach(void); +#else +static inline int rte_xen_dom0_supported(void) +{ + return 0; +} + +static inline phys_addr_t +rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr) +{ + return phy_addr; +} #endif + #ifdef __cplusplus } #endif