NULL,
};
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+uint16_t mempool_flags;
+
struct fwd_config cur_fwd_config;
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
uint32_t retry_enabled;
return 0;
}
+static void
+dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA unmap addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+ ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to un-register addr 0x%p\n", memhdr->addr);
+ }
+}
+
+static void
+dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int ret;
+
+ ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
+ page_size);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to register addr 0x%p\n", memhdr->addr);
+ return;
+ }
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA map addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+}
/*
* Configuration initialisation done once at init time.
*/
-static void
+static struct rte_mempool *
mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
unsigned int socket_id)
{
rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
mb_size, (unsigned int) mb_mempool_cache,
sizeof(struct rte_pktmbuf_pool_private),
- socket_id, 0);
+ socket_id, mempool_flags);
if (rte_mp == NULL)
goto err;
}
rte_pktmbuf_pool_init(rte_mp, NULL);
rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
break;
}
case MP_ALLOC_XMEM:
} else if (verbose_level > 0) {
rte_mempool_dump(stdout, rte_mp);
}
+ return rte_mp;
}
/*
uint8_t i;
for (i = 0; i < num_sockets; i++)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_ids[i]);
+ mempools[i] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_ids[i]);
} else {
if (socket_num == UMA_NO_CONFIG)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ mempools[0] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool, 0);
else
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_num);
+ mempools[socket_num] = mbuf_pool_create
+ (mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_num);
}
init_port_config();
TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
-
- RTE_ETH_FOREACH_DEV_SIBLING(sibling, port_id) {
+ RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
/* reset mapping between old ports and removed device */
rte_eth_devices[sibling].device = NULL;
if (ports[sibling].port_status != RTE_PORT_CLOSED) {
struct rte_device *device;
portid_t pt_id;
int ret;
+ int i;
if (test_done == 0)
stop_packet_forwarding();
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i]) {
+ if (mp_alloc_type == MP_ALLOC_ANON)
+ rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
+ NULL);
+ }
+ }
if (ports != NULL) {
no_link_check = 1;
RTE_ETH_FOREACH_DEV(pt_id) {
return;
}
}
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i])
+ rte_mempool_free(mempools[i]);
+ }
printf("\nBye...\n");
}
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
- rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
-
+ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
+ if (retval < 0)
+ return retval;
rte_eth_dev_info_get(pid, &rte_port->dev_info);
/* If dev_info.vmdq_pool_base is greater than 0,