struct ena_com_io_sq *io_sq)
{
size_t size;
+ int dev_node;
ENA_TOUCH(ctx);
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr,
- io_sq->desc_addr.mem_handle);
- else
- io_sq->desc_addr.virt_addr =
- ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr)
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ } else {
+ ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ ctx->numa_node,
+ dev_node);
+ if (!io_sq->desc_addr.virt_addr)
+ io_sq->desc_addr.virt_addr =
+ ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
if (!io_sq->desc_addr.virt_addr) {
ena_trc_err("memory allocation failed");
struct ena_com_io_cq *io_cq)
{
size_t size;
+ int prev_node;
ENA_TOUCH(ctx);
memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- io_cq->cdesc_addr.mem_handle);
+ ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr)
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
if (!io_cq->cdesc_addr.virt_addr) {
ena_trc_err("memory allocation failed");
ENA_TOUCH(dmadev); \
rte_free(virt); })
+#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \
+ do { \
+ const struct rte_memzone *mz; \
+ char z_name[RTE_MEMZONE_NAMESIZE]; \
+ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
+ snprintf(z_name, sizeof(z_name), \
+ "ena_alloc_%d", ena_alloc_cnt++); \
+ mz = rte_memzone_reserve(z_name, size, node, 0); \
+ virt = mz->addr; \
+ phys = mz->phys_addr; \
+ } while (0)
+
+#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
+ do { \
+ const struct rte_memzone *mz; \
+ char z_name[RTE_MEMZONE_NAMESIZE]; \
+ ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
+ snprintf(z_name, sizeof(z_name), \
+ "ena_alloc_%d", ena_alloc_cnt++); \
+ mz = rte_memzone_reserve(z_name, size, node, 0); \
+ virt = mz->addr; \
+ } while (0)
+
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_version.h>
+#include <rte_eal_memconfig.h>
#include "ena_ethdev.h"
#include "ena_logs.h"
.reta_query = ena_rss_reta_query,
};
+#define NUMA_NO_NODE SOCKET_ID_ANY
+
+static inline int ena_cpu_to_node(int cpu)
+{
+ struct rte_config *config = rte_eal_get_configuration();
+
+ if (likely(cpu < RTE_MAX_MEMZONE))
+ return config->mem_config->memzone[cpu].socket_id;
+
+ return NUMA_NO_NODE;
+}
+
static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx)
{
ctx.msix_vector = -1; /* admin interrupts not used */
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.queue_size = adapter->tx_ring_size;
+ ctx.numa_node = ena_cpu_to_node(queue_idx);
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = -1; /* admin interrupts not used */
ctx.queue_size = adapter->rx_ring_size;
+ ctx.numa_node = ena_cpu_to_node(queue_idx);
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc)