#include <string.h>
#include <time.h>
#include <fcntl.h>
+#ifndef RTE_EXEC_ENV_WINDOWS
#include <sys/mman.h>
+#endif
#include <sys/types.h>
#include <errno.h>
#include <stdbool.h>
#ifdef RTE_LIB_LATENCYSTATS
#include <rte_latencystats.h>
#endif
+#ifdef RTE_EXEC_ENV_WINDOWS
+#include <process.h>
+#endif
#include "testpmd.h"
/* current configuration is in DCB or not,0 means it is not in DCB mode */
uint8_t dcb_config = 0;
-/* Whether the dcb is in testing status */
-uint8_t dcb_test = 0;
-
/*
* Configurable number of RX/TX queues.
*/
*/
enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+/*
+ * Used to set forced link speed
+ */
+uint32_t eth_link_speed;
+
/* Forward function declarations */
static void setup_attached_port(portid_t pi);
static void check_all_ports_link_status(uint32_t port_mask);
set_default_fwd_ports_config();
}
+#ifndef RTE_EXEC_ENV_WINDOWS
/* extremely pessimistic estimation of memory required to create a mempool */
static int
calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
}
}
}
+#endif
static unsigned int
setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *rte_mp = NULL;
+#ifndef RTE_EXEC_ENV_WINDOWS
uint32_t mb_size;
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
+#endif
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
TESTPMD_LOG(INFO,
mb_mempool_cache, 0, mbuf_seg_size, socket_id);
break;
}
+#ifndef RTE_EXEC_ENV_WINDOWS
case MP_ALLOC_ANON:
{
rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
heap_socket);
break;
}
+#endif
case MP_ALLOC_XBUF:
{
struct rte_pktmbuf_extmem *ext_mem;
}
}
+#ifndef RTE_EXEC_ENV_WINDOWS
err:
+#endif
if (rte_mp == NULL) {
rte_exit(EXIT_FAILURE,
"Creation of mbuf pool for socket %u failed: %s\n",
return 0;
}
+static void
+init_config_port_offloads(portid_t pid, uint32_t socket_id)
+{
+ struct rte_port *port = &ports[pid];
+ uint16_t data_size;
+ int ret;
+ int i;
+
+ port->dev_conf.txmode = tx_mode;
+ port->dev_conf.rxmode = rx_mode;
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
+
+ ret = update_jumbo_frame_offload(pid);
+ if (ret != 0)
+ printf("Updating jumbo frame offload failed for port %u\n",
+ pid);
+
+ if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ port->dev_conf.txmode.offloads &=
+ ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ /* Apply Rx offloads configuration */
+ for (i = 0; i < port->dev_info.max_rx_queues; i++)
+ port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
+ /* Apply Tx offloads configuration */
+ for (i = 0; i < port->dev_info.max_tx_queues; i++)
+ port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
+
+ if (eth_link_speed)
+ port->dev_conf.link_speeds = eth_link_speed;
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
+ port->tx_metadata = 0;
+
+ /*
+ * Check for maximum number of segments per MTU.
+ * Accordingly update the mbuf data size.
+ */
+ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+ data_size = rx_mode.max_rx_pkt_len /
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+
+ if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
+ mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
+ TESTPMD_LOG(WARNING,
+ "Configured mbuf size of the first segment %hu\n",
+ mbuf_data_size[0]);
+ }
+ }
+}
+
static void
init_config(void)
{
portid_t pid;
- struct rte_port *port;
struct rte_mempool *mbp;
unsigned int nb_mbuf_per_pool;
lcoreid_t lc_id;
- uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
struct rte_gro_param gro_param;
uint32_t gso_types;
- uint16_t data_size;
- bool warning = 0;
- int k;
- int ret;
-
- memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
}
RTE_ETH_FOREACH_DEV(pid) {
- port = &ports[pid];
- /* Apply default TxRx configuration for all ports */
- port->dev_conf.txmode = tx_mode;
- port->dev_conf.rxmode = rx_mode;
+ uint32_t socket_id;
- ret = eth_dev_info_get_print_err(pid, &port->dev_info);
- if (ret != 0)
- rte_exit(EXIT_FAILURE,
- "rte_eth_dev_info_get() failed\n");
-
- ret = update_jumbo_frame_offload(pid);
- if (ret != 0)
- printf("Updating jumbo frame offload failed for port %u\n",
- pid);
-
- if (!(port->dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_MBUF_FAST_FREE))
- port->dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
if (numa_support) {
- if (port_numa[pid] != NUMA_NO_CONFIG)
- port_per_socket[port_numa[pid]]++;
- else {
- uint32_t socket_id = rte_eth_dev_socket_id(pid);
+ socket_id = port_numa[pid];
+ if (port_numa[pid] == NUMA_NO_CONFIG) {
+ socket_id = rte_eth_dev_socket_id(pid);
/*
* if socket_id is invalid,
*/
if (check_socket_id(socket_id) < 0)
socket_id = socket_ids[0];
- port_per_socket[socket_id]++;
- }
- }
-
- /* Apply Rx offloads configuration */
- for (k = 0; k < port->dev_info.max_rx_queues; k++)
- port->rx_conf[k].offloads =
- port->dev_conf.rxmode.offloads;
- /* Apply Tx offloads configuration */
- for (k = 0; k < port->dev_info.max_tx_queues; k++)
- port->tx_conf[k].offloads =
- port->dev_conf.txmode.offloads;
-
- /* set flag to initialize port/queue */
- port->need_reconfig = 1;
- port->need_reconfig_queues = 1;
- port->tx_metadata = 0;
-
- /* Check for maximum number of segments per MTU. Accordingly
- * update the mbuf data size.
- */
- if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
- port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
- data_size = rx_mode.max_rx_pkt_len /
- port->dev_info.rx_desc_lim.nb_mtu_seg_max;
-
- if ((data_size + RTE_PKTMBUF_HEADROOM) >
- mbuf_data_size[0]) {
- mbuf_data_size[0] = data_size +
- RTE_PKTMBUF_HEADROOM;
- warning = 1;
}
+ } else {
+ socket_id = (socket_num == UMA_NO_CONFIG) ?
+ 0 : socket_num;
}
+ /* Apply default TxRx configuration for all ports */
+ init_config_port_offloads(pid, socket_id);
}
-
- if (warning)
- TESTPMD_LOG(WARNING,
- "Configured mbuf size of the first segment %hu\n",
- mbuf_data_size[0]);
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
fwd_lcores[lc_id]->gso_ctx.flag = 0;
}
- /* Configuration of packet forwarding streams. */
- if (init_fwd_streams() < 0)
- rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
-
fwd_config_setup();
/* create a gro context for each lcore */
void
reconfig(portid_t new_port_id, unsigned socket_id)
{
- struct rte_port *port;
- int ret;
-
/* Reconfiguration of Ethernet ports. */
- port = &ports[new_port_id];
-
- ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
- if (ret != 0)
- return;
-
- /* set flag to initialize port/queue */
- port->need_reconfig = 1;
- port->need_reconfig_queues = 1;
- port->socket_id = socket_id;
-
+ init_config_port_offloads(new_port_id, socket_id);
init_port_config();
}
{
port_fwd_begin_t port_fwd_begin;
port_fwd_end_t port_fwd_end;
- struct rte_port *port;
unsigned int i;
- portid_t pt_id;
if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
printf("Packet forwarding already started\n");
return;
}
-
-
- if(dcb_test) {
- for (i = 0; i < nb_fwd_ports; i++) {
- pt_id = fwd_ports_ids[i];
- port = &ports[pt_id];
- if (!port->dcb_flag) {
- printf("In DCB mode, all forwarding ports must "
- "be configured in this mode.\n");
- return;
- }
- }
- if (nb_fwd_lcores == 1) {
- printf("In DCB mode,the nb forwarding cores "
- "should be larger than 1.\n");
- return;
- }
- }
test_done = 0;
fwd_config_setup();
int peer_pi;
queueid_t qi;
struct rte_port *port;
- struct rte_ether_addr mac_addr;
struct rte_eth_hairpin_cap cap;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
- if(dcb_config)
- dcb_test = 1;
RTE_ETH_FOREACH_DEV(pi) {
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
cnt_pi++;
/* start port */
- if (rte_eth_dev_start(pi) < 0) {
- printf("Fail to start port %d\n", pi);
+ diag = rte_eth_dev_start(pi);
+ if (diag < 0) {
+ printf("Fail to start port %d: %s\n", pi,
+ rte_strerror(-diag));
/* Fail to setup rx queue, return */
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
printf("Port %d can not be set into started\n", pi);
- if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
+ if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
- mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
- mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
- mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
+ port->eth_addr.addr_bytes[0],
+ port->eth_addr.addr_bytes[1],
+ port->eth_addr.addr_bytes[2],
+ port->eth_addr.addr_bytes[3],
+ port->eth_addr.addr_bytes[4],
+ port->eth_addr.addr_bytes[5]);
/* at least one port started, need checking link status */
need_check_link_status = 1;
portid_t peer_pl[RTE_MAX_ETHPORTS];
int peer_pi;
- if (dcb_test) {
- dcb_test = 0;
- dcb_config = 0;
- }
-
if (port_id_is_invalid(pid, ENABLED_WARN))
return;
memset(&da, 0, sizeof(da));
if (rte_devargs_parsef(&da, "%s", identifier)) {
printf("cannot parse identifier\n");
- if (da.args)
- free(da.args);
return;
}
if (ports[port_id].port_status != RTE_PORT_STOPPED) {
printf("Port %u not stopped\n", port_id);
rte_eth_iterator_cleanup(&iterator);
+ rte_devargs_reset(&da);
return;
}
port_flow_flush(port_id);
if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
da.name, da.bus->name);
+ rte_devargs_reset(&da);
return;
}
printf("Device %s is detached\n", identifier);
printf("Now total ports is %d\n", nb_ports);
printf("Done\n");
+ rte_devargs_reset(&da);
}
void
if (test_done == 0)
stop_packet_forwarding();
+#ifndef RTE_EXEC_ENV_WINDOWS
for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
if (mempools[i]) {
if (mp_alloc_type == MP_ALLOC_ANON)
NULL);
}
}
+#endif
if (ports != NULL) {
no_link_check = 1;
RTE_ETH_FOREACH_DEV(pt_id) {
rte_port = &ports[pid];
memset(&port_conf, 0, sizeof(struct rte_eth_conf));
- /* Enter DCB configuration status */
- dcb_config = 1;
port_conf.rxmode = rte_port->dev_conf.rxmode;
port_conf.txmode = rte_port->dev_conf.txmode;
rte_port->dcb_flag = 1;
+ /* Enter DCB configuration status */
+ dcb_config = 1;
+
return 0;
}
/* Set flag to indicate the force termination. */
f_quit = 1;
/* exit with the expected status */
+#ifndef RTE_EXEC_ENV_WINDOWS
signal(signum, SIG_DFL);
kill(getpid(), signum);
+#endif
}
}
if (argc > 1)
launch_args_parse(argc, argv);
+#ifndef RTE_EXEC_ENV_WINDOWS
if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
strerror(errno));
}
+#endif
if (tx_first && interactive)
rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
}
/* Sleep to avoid unnecessary checks */
prev_time = cur_time;
- sleep(1);
+ rte_delay_us_sleep(US_PER_S);
}
}