Add memory infrastructure for runtime Xen DOM0 support.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Jijiang Liu <jijiang.liu@intel.com>
extern "C" {
#endif
+#include <rte_common.h>
+
enum rte_page_sizes {
RTE_PGSIZE_4K = 1ULL << 12,
RTE_PGSIZE_64K = 1ULL << 16,
unsigned rte_memory_get_nrank(void);
#ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int is_xen_dom0_supported(void);
+
+/**< Internal use only - phys to virt mapping for xen */
+phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+
/**
* Return the physical address of elt, which is an element of the pool mp.
*
* @return
* The physical address or error.
*/
-phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+ if (is_xen_dom0_supported())
+ return rte_xen_mem_phy2mch(memseg_id, phy_addr);
+ else
+ return phy_addr;
+}
/**
* Memory init for supporting application running on Xen domain0.
* negative: error
*/
int rte_xen_dom0_memory_attach(void);
+#else
+static inline int is_xen_dom0_supported(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+{
+ return phy_addr;
+}
#endif
+
#ifdef __cplusplus
}
#endif
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#ifdef RTE_LIBRTE_XEN_DOM0
+int is_xen_dom0_supported(void)
+{
+ return internal_config.xen_dom0_support;
+}
+#endif
+
/**
* @file
* Huge page mapping under linux
* Based on physical address to caculate MFN in Xen Dom0.
*/
phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
{
int mfn_id;
uint64_t mfn, mfn_offset;
return 0;
}
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, size_t size, unsigned align,
+ int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ if (is_xen_dom0_supported())
+ return rte_memzone_reserve_bounded(z_name, size, socket_id,
+ 0, align, RTE_PGSIZE_2M);
+ else
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ 0, align);
+}
+
int
rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
int epfd, int op, void *data)
extern void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev);
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ * The name of the memory zone
+ * @param queue_id
+ * The index of the queue to add to name
+ * @param size
+ * The sizeof of the memory area
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+ uint16_t queue_id, size_t size,
+ unsigned align, int socket_id);
+
#ifdef __cplusplus
}
#endif
return usz;
}
+#ifndef RTE_LIBRTE_XEN_DOM0
+/* stub if DOM0 support not configured */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name __rte_unused,
+ unsigned n __rte_unused,
+ unsigned elt_size __rte_unused,
+ unsigned cache_size __rte_unused,
+ unsigned private_data_size __rte_unused,
+ rte_mempool_ctor_t *mp_init __rte_unused,
+ void *mp_init_arg __rte_unused,
+ rte_mempool_obj_ctor_t *obj_init __rte_unused,
+ void *obj_init_arg __rte_unused,
+ int socket_id __rte_unused,
+ unsigned flags __rte_unused)
+{
+ rte_errno = EINVAL;
+ return NULL;
+}
+#endif
+
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
-#else
- return rte_mempool_xmem_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX);
-#endif
+ if (is_xen_dom0_supported())
+ return rte_dom0_mempool_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags);
+ else
+ return rte_mempool_xmem_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+ MEMPOOL_PG_SHIFT_MAX);
}
/*
int socket_id, unsigned flags, void *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
-#ifdef RTE_LIBRTE_XEN_DOM0
/**
* Create a new mempool named *name* in memory on Xen Dom0.
*
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-#endif
+
/**
* Dump the status of the mempool to the console.