* Must be instantiated with the ethernet addresses of peer traffic generator
* ports.
*/
-struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
portid_t nb_peer_eth_addrs = 0;
/*
};
struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+uint16_t mempool_flags;
struct fwd_config cur_fwd_config;
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
return 0;
}
+static void
+dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA unmap addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+ ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to un-register addr 0x%p\n", memhdr->addr);
+ }
+}
+
+static void
+dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int ret;
+
+ ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
+ page_size);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to register addr 0x%p\n", memhdr->addr);
+ return;
+ }
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA map addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+}
/*
* Configuration initialisation done once at init time.
rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
mb_size, (unsigned int) mb_mempool_cache,
sizeof(struct rte_pktmbuf_pool_private),
- socket_id, 0);
+ socket_id, mempool_flags);
if (rte_mp == NULL)
goto err;
}
rte_pktmbuf_pool_init(rte_mp, NULL);
rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
break;
}
case MP_ALLOC_XMEM:
portid_t pi;
queueid_t qi;
struct rte_port *port;
- struct ether_addr mac_addr;
+ struct rte_ether_addr mac_addr;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
-
- RTE_ETH_FOREACH_DEV_SIBLING(sibling, port_id) {
+ RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
/* reset mapping between old ports and removed device */
rte_eth_devices[sibling].device = NULL;
if (ports[sibling].port_status != RTE_PORT_CLOSED) {
if (test_done == 0)
stop_packet_forwarding();
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i]) {
+ if (mp_alloc_type == MP_ALLOC_ANON)
+ rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
+ NULL);
+ }
+ }
if (ports != NULL) {
no_link_check = 1;
RTE_ETH_FOREACH_DEV(pt_id) {
rxtx_port_config(struct rte_port *port)
{
uint16_t qid;
+ uint64_t offloads;
for (qid = 0; qid < nb_rxq; qid++) {
+ offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
+ port->rx_conf[qid].offloads |= offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
}
for (qid = 0; qid < nb_txq; qid++) {
+ offloads = port->tx_conf[qid].offloads;
port->tx_conf[qid] = port->dev_info.default_txconf;
+ port->tx_conf[qid].offloads |= offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
- rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
-
+ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
+ if (retval < 0)
+ return retval;
rte_eth_dev_info_get(pid, &rte_port->dev_info);
/* If dev_info.vmdq_pool_base is greater than 0,