/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
if (ports == NULL)
rte_exit(-EINVAL, "failed to find a next port id\n");
- while ((ports[p].enabled == 0) && (p < size))
+ while ((p < size) && (ports[p].enabled == 0))
p++;
return p;
}
/*
* Configuration initialisation done once at init time.
*/
-struct mbuf_ctor_arg {
- uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
- uint16_t seg_buf_size; /**< size of data segment in mbuf. */
-};
-
-struct mbuf_pool_ctor_arg {
- uint16_t seg_buf_size; /**< size of data segment in mbuf. */
-};
-
-static void
-testpmd_mbuf_ctor(struct rte_mempool *mp,
- void *opaque_arg,
- void *raw_mbuf,
- __attribute__((unused)) unsigned i)
-{
- struct mbuf_ctor_arg *mb_ctor_arg;
- struct rte_mbuf *mb;
-
- mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
- mb = (struct rte_mbuf *) raw_mbuf;
-
- mb->pool = mp;
- mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
- mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
- mb_ctor_arg->seg_buf_offset);
- mb->buf_len = mb_ctor_arg->seg_buf_size;
- mb->ol_flags = 0;
- mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->nb_segs = 1;
- mb->tx_offload = 0;
- mb->vlan_tci = 0;
- mb->hash.rss = 0;
-}
-
-static void
-testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
- void *opaque_arg)
-{
- struct mbuf_pool_ctor_arg *mbp_ctor_arg;
- struct rte_pktmbuf_pool_private *mbp_priv;
-
- if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
- printf("%s(%s) private_data_size %d < %d\n",
- __func__, mp->name, (int) mp->private_data_size,
- (int) sizeof(struct rte_pktmbuf_pool_private));
- return;
- }
- mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
- mbp_priv = rte_mempool_get_priv(mp);
- mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
-}
-
static void
mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
unsigned int socket_id)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *rte_mp;
- struct mbuf_pool_ctor_arg mbp_ctor_arg;
- struct mbuf_ctor_arg mb_ctor_arg;
uint32_t mb_size;
- mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
- mbuf_seg_size);
- mb_ctor_arg.seg_buf_offset =
- (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
- mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
- mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
+ mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
#ifdef RTE_LIBRTE_PMD_XENVIRT
rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
- (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
- testpmd_mbuf_ctor, &mb_ctor_arg,
- socket_id, 0);
+ (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ socket_id, 0);
rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
(unsigned) mb_mempool_cache,
sizeof(struct rte_pktmbuf_pool_private),
- testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
- testpmd_mbuf_ctor, &mb_ctor_arg,
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
socket_id, 0);
else
- rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
- (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
- testpmd_mbuf_ctor, &mb_ctor_arg,
- socket_id, 0);
+ /* wrapper to rte_mempool_create() */
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
#endif
socket_num);
}
- /* Configuration of Ethernet ports. */
- ports = rte_zmalloc("testpmd: ports",
- sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
- RTE_CACHE_LINE_SIZE);
- if (ports == NULL) {
- rte_exit(EXIT_FAILURE,
- "rte_zmalloc(%d struct rte_port) failed\n",
- RTE_MAX_ETHPORTS);
- }
-
- /* enabled allocated ports */
- for (pid = 0; pid < nb_ports; pid++)
- ports[pid].enabled = 1;
-
FOREACH_PORT(pid, ports) {
port = &ports[pid];
rte_eth_dev_info_get(pid, &port->dev_info);
int
start_port(portid_t pid)
{
- int diag, need_check_link_status = 0;
+ int diag, need_check_link_status = -1;
portid_t pi;
queueid_t qi;
struct rte_port *port;
return -1;
}
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return 0;
+
if (init_fwd_streams() < 0) {
printf("Fail from init_fwd_streams()\n");
return -1;
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
+ need_check_link_status = 0;
port = &ports[pi];
if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
RTE_PORT_HANDLING) == 0) {
need_check_link_status = 1;
}
- if (need_check_link_status && !no_link_check)
+ if (need_check_link_status == 1 && !no_link_check)
check_all_ports_link_status(RTE_PORT_ALL);
- else
+ else if (need_check_link_status == 0)
printf("Please stop the ports first\n");
printf("Done\n");
dcb_test = 0;
dcb_config = 0;
}
+
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return;
+
printf("Stopping ports...\n");
FOREACH_PORT(pi, ports) {
- if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
port = &ports[pi];
return;
}
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return;
+
printf("Closing ports...\n");
FOREACH_PORT(pi, ports) {
- if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
port = &ports[pi];
return;
}
- rte_eth_promiscuous_disable(port_id);
-
if (rte_eth_dev_detach(port_id, name))
return;
{
portid_t pt_id;
+ if (test_done == 0)
+ stop_packet_forwarding();
+
FOREACH_PORT(pt_id, ports) {
printf("Stopping port %d...", pt_id);
fflush(stdout);
{
uint8_t i;
- /*
- * Builds up the correct configuration for dcb+vt based on the vlan tags array
- * given above, and the number of traffic classes available for use.
- */
+ /*
+ * Builds up the correct configuration for dcb+vt based on the vlan tags array
+ * given above, and the number of traffic classes available for use.
+ */
if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
return 0;
}
+static void
+init_port(void)
+{
+ portid_t pid;
+
+ /* Configuration of Ethernet ports. */
+ ports = rte_zmalloc("testpmd: ports",
+ sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
+ RTE_CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "rte_zmalloc(%d struct rte_port) failed\n",
+ RTE_MAX_ETHPORTS);
+ }
+
+ /* enabled allocated ports */
+ for (pid = 0; pid < nb_ports; pid++)
+ ports[pid].enabled = 1;
+}
+
int
main(int argc, char** argv)
{
if (nb_ports == 0)
RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
+ /* allocate port structures, and init them */
+ init_port();
+
set_def_fwd_config();
if (nb_lcores == 0)
rte_panic("Empty set of forwarding logical cores - check the "