return rc;
}
-static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
+static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size,
struct bnxt_ctx_mem_buf_info *ctx)
{
if (!ctx)
return -EINVAL;
- ctx->va = rte_zmalloc(type, size, 0);
+ ctx->va = rte_zmalloc_socket(type, size, 0,
+ bp->eth_dev->device->numa_node);
if (ctx->va == NULL)
return -ENOMEM;
rte_mem_lock_page(ctx->va);
sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->rx_fc_in_tbl);
if (rc)
sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->rx_fc_out_tbl);
if (rc)
sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->tx_fc_in_tbl);
if (rc)
sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->tx_fc_out_tbl);
if (rc)
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
rmem->nr_pages * 8,
- SOCKET_ID_ANY,
+ bp->eth_dev->device->numa_node,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
mem_size,
- SOCKET_ID_ANY,
+ bp->eth_dev->device->numa_node,
RTE_MEMZONE_1GB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
* tx bd ring - Only non-zero length if tx_ring_info is not NULL
* rx bd ring - Only non-zero length if rx_ring_info is not NULL
*/
-int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
struct bnxt_tx_queue *txq,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
+ socket_id,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
struct bnxt_cp_ring_info *nqr;
struct bnxt_ring *ring;
int ring_index = BNXT_NUM_ASYNC_CPR(bp);
- unsigned int socket_id;
uint8_t ring_type;
int rc = 0;
if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
-
nqr = rte_zmalloc_socket("nqr",
sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
+ RTE_CACHE_LINE_SIZE,
+ bp->eth_dev->device->numa_node);
if (nqr == NULL)
return -ENOMEM;
ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
+ RTE_CACHE_LINE_SIZE,
+ bp->eth_dev->device->numa_node);
if (ring == NULL) {
rte_free(nqr);
return -ENOMEM;
ring->fw_ring_id = INVALID_HW_RING_ID;
nqr->cp_ring_struct = ring;
- rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
+ rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
+ NULL, nqr, NULL, "l2_nqr");
if (rc) {
rte_free(ring);
rte_free(nqr);
{
struct bnxt_cp_ring_info *cpr = NULL;
struct bnxt_ring *ring = NULL;
- unsigned int socket_id;
if (BNXT_NUM_ASYNC_CPR(bp) == 0)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
-
cpr = rte_zmalloc_socket("cpr",
sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
+ RTE_CACHE_LINE_SIZE,
+ bp->eth_dev->device->numa_node);
if (cpr == NULL)
return -ENOMEM;
ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
+ RTE_CACHE_LINE_SIZE,
+ bp->eth_dev->device->numa_node);
if (ring == NULL) {
rte_free(cpr);
return -ENOMEM;
bp->async_cp_ring = cpr;
cpr->cp_ring_struct = ring;
- return bnxt_alloc_rings(bp, 0, NULL, NULL,
- bp->async_cp_ring, NULL,
- "def_cp");
+ return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
+ NULL, bp->async_cp_ring, NULL, "def_cp");
}
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_alloc_ring_grps(struct bnxt *bp);
-int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
struct bnxt_tx_queue *txq,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
- "rxr");
+ rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
+ NULL, "rxr");
if (rc) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
- "txr")) {
+ if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
+ NULL, "txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
rc = -ENOMEM;
goto err;
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
- entry_length * max_vnics, SOCKET_ID_ANY,
+ entry_length * max_vnics,
+ bp->eth_dev->device->numa_node,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG);