* Must be instantiated with the ethernet addresses of peer traffic generator
* ports.
*/
-struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
portid_t nb_peer_eth_addrs = 0;
/*
* Ethernet device configuration.
*/
struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ /**< Default maximum frame length. */
};
struct rte_eth_txmode tx_mode = {
.vni = "\x00\x00\x00",
.udp_src = 0,
.udp_dst = RTE_BE16(4789),
- .ipv4_src = IPv4(127, 0, 0, 1),
- .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv4_src = RTE_IPV4(127, 0, 0, 1),
+ .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
.select_ipv4 = 1,
.select_vlan = 0,
.tni = "\x00\x00\x00",
- .ipv4_src = IPv4(127, 0, 0, 1),
- .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv4_src = RTE_IPV4(127, 0, 0, 1),
+ .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
static int all_ports_started(void);
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
-uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
+uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
/*
* Helper function to check if socket is already discovered.
portid_t i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
+ peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
peer_eth_addrs[i].addr_bytes[5] = i;
}
}
return 0;
}
+static void
+dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA unmap addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+ ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to un-register addr 0x%p\n", memhdr->addr);
+ }
+}
+
+static void
+dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int ret;
+
+ ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
+ page_size);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to register addr 0x%p\n", memhdr->addr);
+ return;
+ }
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA map addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+}
/*
* Configuration initialisation done once at init time.
}
rte_pktmbuf_pool_init(rte_mp, NULL);
rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
break;
}
case MP_ALLOC_XMEM:
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
struct rte_gro_param gro_param;
uint32_t gso_types;
+ uint16_t data_size;
+ bool warning = 0;
int k;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
port->tx_metadata = 0;
+
+ /* Check for maximum number of segments per MTU. Accordingly
+ * update the mbuf data size.
+ */
+ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+ data_size = rx_mode.max_rx_pkt_len /
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+
+ if ((data_size + RTE_PKTMBUF_HEADROOM) >
+ mbuf_data_size) {
+ mbuf_data_size = data_size +
+ RTE_PKTMBUF_HEADROOM;
+ warning = 1;
+ }
+ }
}
+ if (warning)
+ TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
+ mbuf_data_size);
+
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
- fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
- ETHER_CRC_LEN;
+ fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
+ RTE_ETHER_CRC_LEN;
fwd_lcores[lc_id]->gso_ctx.flag = 0;
}
portid_t pi;
queueid_t qi;
struct rte_port *port;
- struct ether_addr mac_addr;
+ struct rte_ether_addr mac_addr;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
-
- RTE_ETH_FOREACH_DEV_SIBLING(sibling, port_id) {
+ RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
/* reset mapping between old ports and removed device */
rte_eth_devices[sibling].device = NULL;
if (ports[sibling].port_status != RTE_PORT_CLOSED) {
void
pmd_test_exit(void)
{
- struct rte_device *device;
portid_t pt_id;
int ret;
int i;
if (test_done == 0)
stop_packet_forwarding();
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i]) {
+ if (mp_alloc_type == MP_ALLOC_ANON)
+ rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
+ NULL);
+ }
+ }
if (ports != NULL) {
no_link_check = 1;
RTE_ETH_FOREACH_DEV(pt_id) {
printf("\nShutting down port %d...\n", pt_id);
fflush(stdout);
close_port(pt_id);
-
- /*
- * This is a workaround to fix a virtio-user issue that
- * requires to call clean-up routine to remove existing
- * socket.
- * This workaround valid only for testpmd, needs a fix
- * valid for all applications.
- * TODO: Implement proper resource cleanup
- */
- device = rte_eth_devices[pt_id].device;
- if (device && !strcmp(device->driver->name, "net_virtio_user"))
- detach_port_device(pt_id);
}
}
rxtx_port_config(struct rte_port *port)
{
uint16_t qid;
+ uint64_t offloads;
for (qid = 0; qid < nb_rxq; qid++) {
+ offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
+ port->rx_conf[qid].offloads |= offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
}
for (qid = 0; qid < nb_txq; qid++) {
+ offloads = port->tx_conf[qid].offloads;
port->tx_conf[qid] = port->dev_info.default_txconf;
+ port->tx_conf[qid].offloads |= offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
- rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
-
+ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
+ if (retval < 0)
+ return retval;
rte_eth_dev_info_get(pid, &rte_port->dev_info);
/* If dev_info.vmdq_pool_base is greater than 0,