app/testpmd: remove unnecessary void casts
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
81 #endif
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
84 #endif
85 #include <rte_flow.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
89 #endif
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
92 #endif
93 #include <rte_gro.h>
94
95 #include "testpmd.h"
96
97 uint16_t verbose_level = 0; /**< Silent by default. */
98
99 /* use master core for command line ? */
100 uint8_t interactive = 0;
101 uint8_t auto_start = 0;
102 uint8_t tx_first;
103 char cmdline_filename[PATH_MAX] = {0};
104
105 /*
106  * NUMA support configuration.
107  * When set, the NUMA support attempts to dispatch the allocation of the
108  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
109  * probed ports among the CPU sockets 0 and 1.
110  * Otherwise, all memory is allocated from CPU socket 0.
111  */
112 uint8_t numa_support = 1; /**< numa enabled by default */
113
114 /*
115  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
116  * not configured.
117  */
118 uint8_t socket_num = UMA_NO_CONFIG;
119
120 /*
121  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122  */
123 uint8_t mp_anon = 0;
124
125 /*
126  * Record the Ethernet address of peer target ports to which packets are
127  * forwarded.
128  * Must be instantiated with the ethernet addresses of peer traffic generator
129  * ports.
130  */
131 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
132 portid_t nb_peer_eth_addrs = 0;
133
134 /*
135  * Probed Target Environment.
136  */
137 struct rte_port *ports;        /**< For all probed ethernet ports. */
138 portid_t nb_ports;             /**< Number of probed ethernet ports. */
139 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
140 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
141
142 /*
143  * Test Forwarding Configuration.
144  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
145  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
146  */
147 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
148 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
149 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
150 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
151
152 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
153 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
154
155 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
156 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
157
158 /*
159  * Forwarding engines.
160  */
161 struct fwd_engine * fwd_engines[] = {
162         &io_fwd_engine,
163         &mac_fwd_engine,
164         &mac_swap_engine,
165         &flow_gen_engine,
166         &rx_only_engine,
167         &tx_only_engine,
168         &csum_fwd_engine,
169         &icmp_echo_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171         &ieee1588_fwd_engine,
172 #endif
173         NULL,
174 };
175
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186 /*
187  * Configuration of packet segments used by the "txonly" processing engine.
188  */
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191         TXONLY_DEF_PACKET_LEN,
192 };
193 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
197
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
203
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
206
207 /*
208  * Configurable number of RX/TX queues.
209  */
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212
213 /*
214  * Configurable number of RX/TX ring descriptors.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 128
217 #define RTE_TEST_TX_DESC_DEFAULT 512
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253
254 /*
255  * Configurable value of TX queue flags.
256  */
257 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
258
259 /*
260  * Receive Side Scaling (RSS) configuration.
261  */
262 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
263
264 /*
265  * Port topology configuration
266  */
267 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
268
269 /*
270  * Avoids to flush all the RX streams before starts forwarding.
271  */
272 uint8_t no_flush_rx = 0; /* flush by default */
273
274 /*
275  * Flow API isolated mode.
276  */
277 uint8_t flow_isolate_all;
278
279 /*
280  * Avoids to check link status when starting/stopping a port.
281  */
282 uint8_t no_link_check = 0; /* check by default */
283
284 /*
285  * Enable link status change notification
286  */
287 uint8_t lsc_interrupt = 1; /* enabled by default */
288
289 /*
290  * Enable device removal notification.
291  */
292 uint8_t rmv_interrupt = 1; /* enabled by default */
293
294 /*
295  * Display or mask ether events
296  * Default to all events except VF_MBOX
297  */
298 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
299                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
300                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
301                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
302                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
303                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
304
305 /*
306  * NIC bypass mode configuration options.
307  */
308
309 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
310 /* The NIC bypass watchdog timeout. */
311 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
312 #endif
313
314
315 #ifdef RTE_LIBRTE_LATENCY_STATS
316
317 /*
318  * Set when latency stats is enabled in the commandline
319  */
320 uint8_t latencystats_enabled;
321
322 /*
323  * Lcore ID to serive latency statistics.
324  */
325 lcoreid_t latencystats_lcore_id = -1;
326
327 #endif
328
329 /*
330  * Ethernet device configuration.
331  */
332 struct rte_eth_rxmode rx_mode = {
333         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
334         .split_hdr_size = 0,
335         .header_split   = 0, /**< Header Split disabled. */
336         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
337         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
338         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
339         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
340         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
341         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
342 };
343
344 struct rte_fdir_conf fdir_conf = {
345         .mode = RTE_FDIR_MODE_NONE,
346         .pballoc = RTE_FDIR_PBALLOC_64K,
347         .status = RTE_FDIR_REPORT_STATUS,
348         .mask = {
349                 .vlan_tci_mask = 0x0,
350                 .ipv4_mask     = {
351                         .src_ip = 0xFFFFFFFF,
352                         .dst_ip = 0xFFFFFFFF,
353                 },
354                 .ipv6_mask     = {
355                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357                 },
358                 .src_port_mask = 0xFFFF,
359                 .dst_port_mask = 0xFFFF,
360                 .mac_addr_byte_mask = 0xFF,
361                 .tunnel_type_mask = 1,
362                 .tunnel_id_mask = 0xFFFFFFFF,
363         },
364         .drop_queue = 127,
365 };
366
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
380
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
386 #endif
387
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
389
390 /* Forward function declarations */
391 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
392 static void check_all_ports_link_status(uint32_t port_mask);
393 static int eth_event_callback(uint8_t port_id,
394                               enum rte_eth_event_type type,
395                               void *param, void *ret_param);
396
397 /*
398  * Check if all the ports are started.
399  * If yes, return positive value. If not, return zero.
400  */
401 static int all_ports_started(void);
402
403 /*
404  * Helper function to check if socket is already discovered.
405  * If yes, return positive value. If not, return zero.
406  */
407 int
408 new_socket_id(unsigned int socket_id)
409 {
410         unsigned int i;
411
412         for (i = 0; i < num_sockets; i++) {
413                 if (socket_ids[i] == socket_id)
414                         return 0;
415         }
416         return 1;
417 }
418
419 /*
420  * Setup default configuration.
421  */
422 static void
423 set_default_fwd_lcores_config(void)
424 {
425         unsigned int i;
426         unsigned int nb_lc;
427         unsigned int sock_num;
428
429         nb_lc = 0;
430         for (i = 0; i < RTE_MAX_LCORE; i++) {
431                 sock_num = rte_lcore_to_socket_id(i);
432                 if (new_socket_id(sock_num)) {
433                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
434                                 rte_exit(EXIT_FAILURE,
435                                          "Total sockets greater than %u\n",
436                                          RTE_MAX_NUMA_NODES);
437                         }
438                         socket_ids[num_sockets++] = sock_num;
439                 }
440                 if (!rte_lcore_is_enabled(i))
441                         continue;
442                 if (i == rte_get_master_lcore())
443                         continue;
444                 fwd_lcores_cpuids[nb_lc++] = i;
445         }
446         nb_lcores = (lcoreid_t) nb_lc;
447         nb_cfg_lcores = nb_lcores;
448         nb_fwd_lcores = 1;
449 }
450
451 static void
452 set_def_peer_eth_addrs(void)
453 {
454         portid_t i;
455
456         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
457                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
458                 peer_eth_addrs[i].addr_bytes[5] = i;
459         }
460 }
461
462 static void
463 set_default_fwd_ports_config(void)
464 {
465         portid_t pt_id;
466
467         for (pt_id = 0; pt_id < nb_ports; pt_id++)
468                 fwd_ports_ids[pt_id] = pt_id;
469
470         nb_cfg_ports = nb_ports;
471         nb_fwd_ports = nb_ports;
472 }
473
474 void
475 set_def_fwd_config(void)
476 {
477         set_default_fwd_lcores_config();
478         set_def_peer_eth_addrs();
479         set_default_fwd_ports_config();
480 }
481
482 /*
483  * Configuration initialisation done once at init time.
484  */
485 static void
486 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
487                  unsigned int socket_id)
488 {
489         char pool_name[RTE_MEMPOOL_NAMESIZE];
490         struct rte_mempool *rte_mp = NULL;
491         uint32_t mb_size;
492
493         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
494         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
495
496         RTE_LOG(INFO, USER1,
497                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
498                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
499
500 #ifdef RTE_LIBRTE_PMD_XENVIRT
501         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
502                 (unsigned) mb_mempool_cache,
503                 sizeof(struct rte_pktmbuf_pool_private),
504                 rte_pktmbuf_pool_init, NULL,
505                 rte_pktmbuf_init, NULL,
506                 socket_id, 0);
507 #endif
508
509         /* if the former XEN allocation failed fall back to normal allocation */
510         if (rte_mp == NULL) {
511                 if (mp_anon != 0) {
512                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
513                                 mb_size, (unsigned) mb_mempool_cache,
514                                 sizeof(struct rte_pktmbuf_pool_private),
515                                 socket_id, 0);
516                         if (rte_mp == NULL)
517                                 goto err;
518
519                         if (rte_mempool_populate_anon(rte_mp) == 0) {
520                                 rte_mempool_free(rte_mp);
521                                 rte_mp = NULL;
522                                 goto err;
523                         }
524                         rte_pktmbuf_pool_init(rte_mp, NULL);
525                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
526                 } else {
527                         /* wrapper to rte_mempool_create() */
528                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
529                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
530                 }
531         }
532
533 err:
534         if (rte_mp == NULL) {
535                 rte_exit(EXIT_FAILURE,
536                         "Creation of mbuf pool for socket %u failed: %s\n",
537                         socket_id, rte_strerror(rte_errno));
538         } else if (verbose_level > 0) {
539                 rte_mempool_dump(stdout, rte_mp);
540         }
541 }
542
543 /*
544  * Check given socket id is valid or not with NUMA mode,
545  * if valid, return 0, else return -1
546  */
547 static int
548 check_socket_id(const unsigned int socket_id)
549 {
550         static int warning_once = 0;
551
552         if (new_socket_id(socket_id)) {
553                 if (!warning_once && numa_support)
554                         printf("Warning: NUMA should be configured manually by"
555                                " using --port-numa-config and"
556                                " --ring-numa-config parameters along with"
557                                " --numa.\n");
558                 warning_once = 1;
559                 return -1;
560         }
561         return 0;
562 }
563
564 static void
565 init_config(void)
566 {
567         portid_t pid;
568         struct rte_port *port;
569         struct rte_mempool *mbp;
570         unsigned int nb_mbuf_per_pool;
571         lcoreid_t  lc_id;
572         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
573
574         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
575
576         if (numa_support) {
577                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
578                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
579                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
580         }
581
582         /* Configuration of logical cores. */
583         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
584                                 sizeof(struct fwd_lcore *) * nb_lcores,
585                                 RTE_CACHE_LINE_SIZE);
586         if (fwd_lcores == NULL) {
587                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
588                                                         "failed\n", nb_lcores);
589         }
590         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
592                                                sizeof(struct fwd_lcore),
593                                                RTE_CACHE_LINE_SIZE);
594                 if (fwd_lcores[lc_id] == NULL) {
595                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
596                                                                 "failed\n");
597                 }
598                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
599         }
600
601         RTE_ETH_FOREACH_DEV(pid) {
602                 port = &ports[pid];
603                 rte_eth_dev_info_get(pid, &port->dev_info);
604
605                 if (numa_support) {
606                         if (port_numa[pid] != NUMA_NO_CONFIG)
607                                 port_per_socket[port_numa[pid]]++;
608                         else {
609                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
610
611                                 /* if socket_id is invalid, set to 0 */
612                                 if (check_socket_id(socket_id) < 0)
613                                         socket_id = 0;
614                                 port_per_socket[socket_id]++;
615                         }
616                 }
617
618                 /* set flag to initialize port/queue */
619                 port->need_reconfig = 1;
620                 port->need_reconfig_queues = 1;
621         }
622
623         /*
624          * Create pools of mbuf.
625          * If NUMA support is disabled, create a single pool of mbuf in
626          * socket 0 memory by default.
627          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
628          *
629          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
630          * nb_txd can be configured at run time.
631          */
632         if (param_total_num_mbufs)
633                 nb_mbuf_per_pool = param_total_num_mbufs;
634         else {
635                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
636                         (nb_lcores * mb_mempool_cache) +
637                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
638                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
639         }
640
641         if (numa_support) {
642                 uint8_t i;
643
644                 for (i = 0; i < num_sockets; i++)
645                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
646                                          socket_ids[i]);
647         } else {
648                 if (socket_num == UMA_NO_CONFIG)
649                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
650                 else
651                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
652                                                  socket_num);
653         }
654
655         init_port_config();
656
657         /*
658          * Records which Mbuf pool to use by each logical core, if needed.
659          */
660         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
661                 mbp = mbuf_pool_find(
662                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
663
664                 if (mbp == NULL)
665                         mbp = mbuf_pool_find(0);
666                 fwd_lcores[lc_id]->mbp = mbp;
667         }
668
669         /* Configuration of packet forwarding streams. */
670         if (init_fwd_streams() < 0)
671                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
672
673         fwd_config_setup();
674 }
675
676
677 void
678 reconfig(portid_t new_port_id, unsigned socket_id)
679 {
680         struct rte_port *port;
681
682         /* Reconfiguration of Ethernet ports. */
683         port = &ports[new_port_id];
684         rte_eth_dev_info_get(new_port_id, &port->dev_info);
685
686         /* set flag to initialize port/queue */
687         port->need_reconfig = 1;
688         port->need_reconfig_queues = 1;
689         port->socket_id = socket_id;
690
691         init_port_config();
692 }
693
694
695 int
696 init_fwd_streams(void)
697 {
698         portid_t pid;
699         struct rte_port *port;
700         streamid_t sm_id, nb_fwd_streams_new;
701         queueid_t q;
702
703         /* set socket id according to numa or not */
704         RTE_ETH_FOREACH_DEV(pid) {
705                 port = &ports[pid];
706                 if (nb_rxq > port->dev_info.max_rx_queues) {
707                         printf("Fail: nb_rxq(%d) is greater than "
708                                 "max_rx_queues(%d)\n", nb_rxq,
709                                 port->dev_info.max_rx_queues);
710                         return -1;
711                 }
712                 if (nb_txq > port->dev_info.max_tx_queues) {
713                         printf("Fail: nb_txq(%d) is greater than "
714                                 "max_tx_queues(%d)\n", nb_txq,
715                                 port->dev_info.max_tx_queues);
716                         return -1;
717                 }
718                 if (numa_support) {
719                         if (port_numa[pid] != NUMA_NO_CONFIG)
720                                 port->socket_id = port_numa[pid];
721                         else {
722                                 port->socket_id = rte_eth_dev_socket_id(pid);
723
724                                 /* if socket_id is invalid, set to 0 */
725                                 if (check_socket_id(port->socket_id) < 0)
726                                         port->socket_id = 0;
727                         }
728                 }
729                 else {
730                         if (socket_num == UMA_NO_CONFIG)
731                                 port->socket_id = 0;
732                         else
733                                 port->socket_id = socket_num;
734                 }
735         }
736
737         q = RTE_MAX(nb_rxq, nb_txq);
738         if (q == 0) {
739                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
740                 return -1;
741         }
742         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
743         if (nb_fwd_streams_new == nb_fwd_streams)
744                 return 0;
745         /* clear the old */
746         if (fwd_streams != NULL) {
747                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
748                         if (fwd_streams[sm_id] == NULL)
749                                 continue;
750                         rte_free(fwd_streams[sm_id]);
751                         fwd_streams[sm_id] = NULL;
752                 }
753                 rte_free(fwd_streams);
754                 fwd_streams = NULL;
755         }
756
757         /* init new */
758         nb_fwd_streams = nb_fwd_streams_new;
759         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
760                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
761         if (fwd_streams == NULL)
762                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
763                                                 "failed\n", nb_fwd_streams);
764
765         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
766                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
767                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
768                 if (fwd_streams[sm_id] == NULL)
769                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
770                                                                 " failed\n");
771         }
772
773         return 0;
774 }
775
776 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
777 static void
778 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
779 {
780         unsigned int total_burst;
781         unsigned int nb_burst;
782         unsigned int burst_stats[3];
783         uint16_t pktnb_stats[3];
784         uint16_t nb_pkt;
785         int burst_percent[3];
786
787         /*
788          * First compute the total number of packet bursts and the
789          * two highest numbers of bursts of the same number of packets.
790          */
791         total_burst = 0;
792         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
793         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
794         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
795                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
796                 if (nb_burst == 0)
797                         continue;
798                 total_burst += nb_burst;
799                 if (nb_burst > burst_stats[0]) {
800                         burst_stats[1] = burst_stats[0];
801                         pktnb_stats[1] = pktnb_stats[0];
802                         burst_stats[0] = nb_burst;
803                         pktnb_stats[0] = nb_pkt;
804                 }
805         }
806         if (total_burst == 0)
807                 return;
808         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
809         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
810                burst_percent[0], (int) pktnb_stats[0]);
811         if (burst_stats[0] == total_burst) {
812                 printf("]\n");
813                 return;
814         }
815         if (burst_stats[0] + burst_stats[1] == total_burst) {
816                 printf(" + %d%% of %d pkts]\n",
817                        100 - burst_percent[0], pktnb_stats[1]);
818                 return;
819         }
820         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
821         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
822         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
823                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
824                 return;
825         }
826         printf(" + %d%% of %d pkts + %d%% of others]\n",
827                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
828 }
829 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
830
831 static void
832 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
833 {
834         struct rte_port *port;
835         uint8_t i;
836
837         static const char *fwd_stats_border = "----------------------";
838
839         port = &ports[port_id];
840         printf("\n  %s Forward statistics for port %-2d %s\n",
841                fwd_stats_border, port_id, fwd_stats_border);
842
843         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
844                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
845                        "%-"PRIu64"\n",
846                        stats->ipackets, stats->imissed,
847                        (uint64_t) (stats->ipackets + stats->imissed));
848
849                 if (cur_fwd_eng == &csum_fwd_engine)
850                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
851                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
852                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
853                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
854                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
855                 }
856
857                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
858                        "%-"PRIu64"\n",
859                        stats->opackets, port->tx_dropped,
860                        (uint64_t) (stats->opackets + port->tx_dropped));
861         }
862         else {
863                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
864                        "%14"PRIu64"\n",
865                        stats->ipackets, stats->imissed,
866                        (uint64_t) (stats->ipackets + stats->imissed));
867
868                 if (cur_fwd_eng == &csum_fwd_engine)
869                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
870                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
871                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
872                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
873                         printf("  RX-nombufs:             %14"PRIu64"\n",
874                                stats->rx_nombuf);
875                 }
876
877                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
878                        "%14"PRIu64"\n",
879                        stats->opackets, port->tx_dropped,
880                        (uint64_t) (stats->opackets + port->tx_dropped));
881         }
882
883 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
884         if (port->rx_stream)
885                 pkt_burst_stats_display("RX",
886                         &port->rx_stream->rx_burst_stats);
887         if (port->tx_stream)
888                 pkt_burst_stats_display("TX",
889                         &port->tx_stream->tx_burst_stats);
890 #endif
891
892         if (port->rx_queue_stats_mapping_enabled) {
893                 printf("\n");
894                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895                         printf("  Stats reg %2d RX-packets:%14"PRIu64
896                                "     RX-errors:%14"PRIu64
897                                "    RX-bytes:%14"PRIu64"\n",
898                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
899                 }
900                 printf("\n");
901         }
902         if (port->tx_queue_stats_mapping_enabled) {
903                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
904                         printf("  Stats reg %2d TX-packets:%14"PRIu64
905                                "                                 TX-bytes:%14"PRIu64"\n",
906                                i, stats->q_opackets[i], stats->q_obytes[i]);
907                 }
908         }
909
910         printf("  %s--------------------------------%s\n",
911                fwd_stats_border, fwd_stats_border);
912 }
913
914 static void
915 fwd_stream_stats_display(streamid_t stream_id)
916 {
917         struct fwd_stream *fs;
918         static const char *fwd_top_stats_border = "-------";
919
920         fs = fwd_streams[stream_id];
921         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
922             (fs->fwd_dropped == 0))
923                 return;
924         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
925                "TX Port=%2d/Queue=%2d %s\n",
926                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
927                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
928         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
929                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
930
931         /* if checksum mode */
932         if (cur_fwd_eng == &csum_fwd_engine) {
933                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
934                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
935         }
936
937 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
938         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
939         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
940 #endif
941 }
942
943 static void
944 flush_fwd_rx_queues(void)
945 {
946         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
947         portid_t  rxp;
948         portid_t port_id;
949         queueid_t rxq;
950         uint16_t  nb_rx;
951         uint16_t  i;
952         uint8_t   j;
953         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
954         uint64_t timer_period;
955
956         /* convert to number of cycles */
957         timer_period = rte_get_timer_hz(); /* 1 second timeout */
958
959         for (j = 0; j < 2; j++) {
960                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
961                         for (rxq = 0; rxq < nb_rxq; rxq++) {
962                                 port_id = fwd_ports_ids[rxp];
963                                 /**
964                                 * testpmd can stuck in the below do while loop
965                                 * if rte_eth_rx_burst() always returns nonzero
966                                 * packets. So timer is added to exit this loop
967                                 * after 1sec timer expiry.
968                                 */
969                                 prev_tsc = rte_rdtsc();
970                                 do {
971                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
972                                                 pkts_burst, MAX_PKT_BURST);
973                                         for (i = 0; i < nb_rx; i++)
974                                                 rte_pktmbuf_free(pkts_burst[i]);
975
976                                         cur_tsc = rte_rdtsc();
977                                         diff_tsc = cur_tsc - prev_tsc;
978                                         timer_tsc += diff_tsc;
979                                 } while ((nb_rx > 0) &&
980                                         (timer_tsc < timer_period));
981                                 timer_tsc = 0;
982                         }
983                 }
984                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
985         }
986 }
987
988 static void
989 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
990 {
991         struct fwd_stream **fsm;
992         streamid_t nb_fs;
993         streamid_t sm_id;
994 #ifdef RTE_LIBRTE_BITRATE
995         uint64_t tics_per_1sec;
996         uint64_t tics_datum;
997         uint64_t tics_current;
998         uint8_t idx_port, cnt_ports;
999
1000         cnt_ports = rte_eth_dev_count();
1001         tics_datum = rte_rdtsc();
1002         tics_per_1sec = rte_get_timer_hz();
1003 #endif
1004         fsm = &fwd_streams[fc->stream_idx];
1005         nb_fs = fc->stream_nb;
1006         do {
1007                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1008                         (*pkt_fwd)(fsm[sm_id]);
1009 #ifdef RTE_LIBRTE_BITRATE
1010                 if (bitrate_enabled != 0 &&
1011                                 bitrate_lcore_id == rte_lcore_id()) {
1012                         tics_current = rte_rdtsc();
1013                         if (tics_current - tics_datum >= tics_per_1sec) {
1014                                 /* Periodic bitrate calculation */
1015                                 for (idx_port = 0;
1016                                                 idx_port < cnt_ports;
1017                                                 idx_port++)
1018                                         rte_stats_bitrate_calc(bitrate_data,
1019                                                 idx_port);
1020                                 tics_datum = tics_current;
1021                         }
1022                 }
1023 #endif
1024 #ifdef RTE_LIBRTE_LATENCY_STATS
1025                 if (latencystats_enabled != 0 &&
1026                                 latencystats_lcore_id == rte_lcore_id())
1027                         rte_latencystats_update();
1028 #endif
1029
1030         } while (! fc->stopped);
1031 }
1032
1033 static int
1034 start_pkt_forward_on_core(void *fwd_arg)
1035 {
1036         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1037                              cur_fwd_config.fwd_eng->packet_fwd);
1038         return 0;
1039 }
1040
1041 /*
1042  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1043  * Used to start communication flows in network loopback test configurations.
1044  */
1045 static int
1046 run_one_txonly_burst_on_core(void *fwd_arg)
1047 {
1048         struct fwd_lcore *fwd_lc;
1049         struct fwd_lcore tmp_lcore;
1050
1051         fwd_lc = (struct fwd_lcore *) fwd_arg;
1052         tmp_lcore = *fwd_lc;
1053         tmp_lcore.stopped = 1;
1054         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1055         return 0;
1056 }
1057
1058 /*
1059  * Launch packet forwarding:
1060  *     - Setup per-port forwarding context.
1061  *     - launch logical cores with their forwarding configuration.
1062  */
1063 static void
1064 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1065 {
1066         port_fwd_begin_t port_fwd_begin;
1067         unsigned int i;
1068         unsigned int lc_id;
1069         int diag;
1070
1071         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1072         if (port_fwd_begin != NULL) {
1073                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1074                         (*port_fwd_begin)(fwd_ports_ids[i]);
1075         }
1076         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1077                 lc_id = fwd_lcores_cpuids[i];
1078                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1079                         fwd_lcores[i]->stopped = 0;
1080                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1081                                                      fwd_lcores[i], lc_id);
1082                         if (diag != 0)
1083                                 printf("launch lcore %u failed - diag=%d\n",
1084                                        lc_id, diag);
1085                 }
1086         }
1087 }
1088
1089 /*
1090  * Launch packet forwarding configuration.
1091  */
1092 void
1093 start_packet_forwarding(int with_tx_first)
1094 {
1095         port_fwd_begin_t port_fwd_begin;
1096         port_fwd_end_t  port_fwd_end;
1097         struct rte_port *port;
1098         unsigned int i;
1099         portid_t   pt_id;
1100         streamid_t sm_id;
1101
1102         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1103                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1104
1105         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1106                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1107
1108         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1109                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1110                 (!nb_rxq || !nb_txq))
1111                 rte_exit(EXIT_FAILURE,
1112                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1113                         cur_fwd_eng->fwd_mode_name);
1114
1115         if (all_ports_started() == 0) {
1116                 printf("Not all ports were started\n");
1117                 return;
1118         }
1119         if (test_done == 0) {
1120                 printf("Packet forwarding already started\n");
1121                 return;
1122         }
1123
1124         if (init_fwd_streams() < 0) {
1125                 printf("Fail from init_fwd_streams()\n");
1126                 return;
1127         }
1128
1129         if(dcb_test) {
1130                 for (i = 0; i < nb_fwd_ports; i++) {
1131                         pt_id = fwd_ports_ids[i];
1132                         port = &ports[pt_id];
1133                         if (!port->dcb_flag) {
1134                                 printf("In DCB mode, all forwarding ports must "
1135                                        "be configured in this mode.\n");
1136                                 return;
1137                         }
1138                 }
1139                 if (nb_fwd_lcores == 1) {
1140                         printf("In DCB mode,the nb forwarding cores "
1141                                "should be larger than 1.\n");
1142                         return;
1143                 }
1144         }
1145         test_done = 0;
1146
1147         if(!no_flush_rx)
1148                 flush_fwd_rx_queues();
1149
1150         fwd_config_setup();
1151         pkt_fwd_config_display(&cur_fwd_config);
1152         rxtx_config_display();
1153
1154         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1155                 pt_id = fwd_ports_ids[i];
1156                 port = &ports[pt_id];
1157                 rte_eth_stats_get(pt_id, &port->stats);
1158                 port->tx_dropped = 0;
1159
1160                 map_port_queue_stats_mapping_registers(pt_id, port);
1161         }
1162         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1163                 fwd_streams[sm_id]->rx_packets = 0;
1164                 fwd_streams[sm_id]->tx_packets = 0;
1165                 fwd_streams[sm_id]->fwd_dropped = 0;
1166                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1167                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1168
1169 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1170                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1171                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1172                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1173                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1174 #endif
1175 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1176                 fwd_streams[sm_id]->core_cycles = 0;
1177 #endif
1178         }
1179         if (with_tx_first) {
1180                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1181                 if (port_fwd_begin != NULL) {
1182                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1183                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1184                 }
1185                 while (with_tx_first--) {
1186                         launch_packet_forwarding(
1187                                         run_one_txonly_burst_on_core);
1188                         rte_eal_mp_wait_lcore();
1189                 }
1190                 port_fwd_end = tx_only_engine.port_fwd_end;
1191                 if (port_fwd_end != NULL) {
1192                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1193                                 (*port_fwd_end)(fwd_ports_ids[i]);
1194                 }
1195         }
1196         launch_packet_forwarding(start_pkt_forward_on_core);
1197 }
1198
1199 void
1200 stop_packet_forwarding(void)
1201 {
1202         struct rte_eth_stats stats;
1203         struct rte_port *port;
1204         port_fwd_end_t  port_fwd_end;
1205         int i;
1206         portid_t   pt_id;
1207         streamid_t sm_id;
1208         lcoreid_t  lc_id;
1209         uint64_t total_recv;
1210         uint64_t total_xmit;
1211         uint64_t total_rx_dropped;
1212         uint64_t total_tx_dropped;
1213         uint64_t total_rx_nombuf;
1214         uint64_t tx_dropped;
1215         uint64_t rx_bad_ip_csum;
1216         uint64_t rx_bad_l4_csum;
1217 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1218         uint64_t fwd_cycles;
1219 #endif
1220         static const char *acc_stats_border = "+++++++++++++++";
1221
1222         if (test_done) {
1223                 printf("Packet forwarding not started\n");
1224                 return;
1225         }
1226         printf("Telling cores to stop...");
1227         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1228                 fwd_lcores[lc_id]->stopped = 1;
1229         printf("\nWaiting for lcores to finish...\n");
1230         rte_eal_mp_wait_lcore();
1231         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1232         if (port_fwd_end != NULL) {
1233                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1234                         pt_id = fwd_ports_ids[i];
1235                         (*port_fwd_end)(pt_id);
1236                 }
1237         }
1238 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1239         fwd_cycles = 0;
1240 #endif
1241         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1242                 if (cur_fwd_config.nb_fwd_streams >
1243                     cur_fwd_config.nb_fwd_ports) {
1244                         fwd_stream_stats_display(sm_id);
1245                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1246                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1247                 } else {
1248                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1249                                 fwd_streams[sm_id];
1250                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1251                                 fwd_streams[sm_id];
1252                 }
1253                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1254                 tx_dropped = (uint64_t) (tx_dropped +
1255                                          fwd_streams[sm_id]->fwd_dropped);
1256                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1257
1258                 rx_bad_ip_csum =
1259                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1260                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1261                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1262                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1263                                                         rx_bad_ip_csum;
1264
1265                 rx_bad_l4_csum =
1266                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1267                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1268                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1269                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1270                                                         rx_bad_l4_csum;
1271
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273                 fwd_cycles = (uint64_t) (fwd_cycles +
1274                                          fwd_streams[sm_id]->core_cycles);
1275 #endif
1276         }
1277         total_recv = 0;
1278         total_xmit = 0;
1279         total_rx_dropped = 0;
1280         total_tx_dropped = 0;
1281         total_rx_nombuf  = 0;
1282         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1283                 pt_id = fwd_ports_ids[i];
1284
1285                 port = &ports[pt_id];
1286                 rte_eth_stats_get(pt_id, &stats);
1287                 stats.ipackets -= port->stats.ipackets;
1288                 port->stats.ipackets = 0;
1289                 stats.opackets -= port->stats.opackets;
1290                 port->stats.opackets = 0;
1291                 stats.ibytes   -= port->stats.ibytes;
1292                 port->stats.ibytes = 0;
1293                 stats.obytes   -= port->stats.obytes;
1294                 port->stats.obytes = 0;
1295                 stats.imissed  -= port->stats.imissed;
1296                 port->stats.imissed = 0;
1297                 stats.oerrors  -= port->stats.oerrors;
1298                 port->stats.oerrors = 0;
1299                 stats.rx_nombuf -= port->stats.rx_nombuf;
1300                 port->stats.rx_nombuf = 0;
1301
1302                 total_recv += stats.ipackets;
1303                 total_xmit += stats.opackets;
1304                 total_rx_dropped += stats.imissed;
1305                 total_tx_dropped += port->tx_dropped;
1306                 total_rx_nombuf  += stats.rx_nombuf;
1307
1308                 fwd_port_stats_display(pt_id, &stats);
1309         }
1310         printf("\n  %s Accumulated forward statistics for all ports"
1311                "%s\n",
1312                acc_stats_border, acc_stats_border);
1313         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1314                "%-"PRIu64"\n"
1315                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1316                "%-"PRIu64"\n",
1317                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1318                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1319         if (total_rx_nombuf > 0)
1320                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1321         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1322                "%s\n",
1323                acc_stats_border, acc_stats_border);
1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1325         if (total_recv > 0)
1326                 printf("\n  CPU cycles/packet=%u (total cycles="
1327                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1328                        (unsigned int)(fwd_cycles / total_recv),
1329                        fwd_cycles, total_recv);
1330 #endif
1331         printf("\nDone.\n");
1332         test_done = 1;
1333 }
1334
1335 void
1336 dev_set_link_up(portid_t pid)
1337 {
1338         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1339                 printf("\nSet link up fail.\n");
1340 }
1341
1342 void
1343 dev_set_link_down(portid_t pid)
1344 {
1345         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1346                 printf("\nSet link down fail.\n");
1347 }
1348
1349 static int
1350 all_ports_started(void)
1351 {
1352         portid_t pi;
1353         struct rte_port *port;
1354
1355         RTE_ETH_FOREACH_DEV(pi) {
1356                 port = &ports[pi];
1357                 /* Check if there is a port which is not started */
1358                 if ((port->port_status != RTE_PORT_STARTED) &&
1359                         (port->slave_flag == 0))
1360                         return 0;
1361         }
1362
1363         /* No port is not started */
1364         return 1;
1365 }
1366
1367 int
1368 all_ports_stopped(void)
1369 {
1370         portid_t pi;
1371         struct rte_port *port;
1372
1373         RTE_ETH_FOREACH_DEV(pi) {
1374                 port = &ports[pi];
1375                 if ((port->port_status != RTE_PORT_STOPPED) &&
1376                         (port->slave_flag == 0))
1377                         return 0;
1378         }
1379
1380         return 1;
1381 }
1382
1383 int
1384 port_is_started(portid_t port_id)
1385 {
1386         if (port_id_is_invalid(port_id, ENABLED_WARN))
1387                 return 0;
1388
1389         if (ports[port_id].port_status != RTE_PORT_STARTED)
1390                 return 0;
1391
1392         return 1;
1393 }
1394
1395 static int
1396 port_is_closed(portid_t port_id)
1397 {
1398         if (port_id_is_invalid(port_id, ENABLED_WARN))
1399                 return 0;
1400
1401         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1402                 return 0;
1403
1404         return 1;
1405 }
1406
1407 int
1408 start_port(portid_t pid)
1409 {
1410         int diag, need_check_link_status = -1;
1411         portid_t pi;
1412         queueid_t qi;
1413         struct rte_port *port;
1414         struct ether_addr mac_addr;
1415         enum rte_eth_event_type event_type;
1416
1417         if (port_id_is_invalid(pid, ENABLED_WARN))
1418                 return 0;
1419
1420         if(dcb_config)
1421                 dcb_test = 1;
1422         RTE_ETH_FOREACH_DEV(pi) {
1423                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1424                         continue;
1425
1426                 need_check_link_status = 0;
1427                 port = &ports[pi];
1428                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1429                                                  RTE_PORT_HANDLING) == 0) {
1430                         printf("Port %d is now not stopped\n", pi);
1431                         continue;
1432                 }
1433
1434                 if (port->need_reconfig > 0) {
1435                         port->need_reconfig = 0;
1436
1437                         if (flow_isolate_all) {
1438                                 int ret = port_flow_isolate(pi, 1);
1439                                 if (ret) {
1440                                         printf("Failed to apply isolated"
1441                                                " mode on port %d\n", pi);
1442                                         return -1;
1443                                 }
1444                         }
1445
1446                         printf("Configuring Port %d (socket %u)\n", pi,
1447                                         port->socket_id);
1448                         /* configure port */
1449                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1450                                                 &(port->dev_conf));
1451                         if (diag != 0) {
1452                                 if (rte_atomic16_cmpset(&(port->port_status),
1453                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1454                                         printf("Port %d can not be set back "
1455                                                         "to stopped\n", pi);
1456                                 printf("Fail to configure port %d\n", pi);
1457                                 /* try to reconfigure port next time */
1458                                 port->need_reconfig = 1;
1459                                 return -1;
1460                         }
1461                 }
1462                 if (port->need_reconfig_queues > 0) {
1463                         port->need_reconfig_queues = 0;
1464                         /* setup tx queues */
1465                         for (qi = 0; qi < nb_txq; qi++) {
1466                                 if ((numa_support) &&
1467                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1468                                         diag = rte_eth_tx_queue_setup(pi, qi,
1469                                                 nb_txd,txring_numa[pi],
1470                                                 &(port->tx_conf));
1471                                 else
1472                                         diag = rte_eth_tx_queue_setup(pi, qi,
1473                                                 nb_txd,port->socket_id,
1474                                                 &(port->tx_conf));
1475
1476                                 if (diag == 0)
1477                                         continue;
1478
1479                                 /* Fail to setup tx queue, return */
1480                                 if (rte_atomic16_cmpset(&(port->port_status),
1481                                                         RTE_PORT_HANDLING,
1482                                                         RTE_PORT_STOPPED) == 0)
1483                                         printf("Port %d can not be set back "
1484                                                         "to stopped\n", pi);
1485                                 printf("Fail to configure port %d tx queues\n", pi);
1486                                 /* try to reconfigure queues next time */
1487                                 port->need_reconfig_queues = 1;
1488                                 return -1;
1489                         }
1490                         /* setup rx queues */
1491                         for (qi = 0; qi < nb_rxq; qi++) {
1492                                 if ((numa_support) &&
1493                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1494                                         struct rte_mempool * mp =
1495                                                 mbuf_pool_find(rxring_numa[pi]);
1496                                         if (mp == NULL) {
1497                                                 printf("Failed to setup RX queue:"
1498                                                         "No mempool allocation"
1499                                                         " on the socket %d\n",
1500                                                         rxring_numa[pi]);
1501                                                 return -1;
1502                                         }
1503
1504                                         diag = rte_eth_rx_queue_setup(pi, qi,
1505                                              nb_rxd,rxring_numa[pi],
1506                                              &(port->rx_conf),mp);
1507                                 } else {
1508                                         struct rte_mempool *mp =
1509                                                 mbuf_pool_find(port->socket_id);
1510                                         if (mp == NULL) {
1511                                                 printf("Failed to setup RX queue:"
1512                                                         "No mempool allocation"
1513                                                         " on the socket %d\n",
1514                                                         port->socket_id);
1515                                                 return -1;
1516                                         }
1517                                         diag = rte_eth_rx_queue_setup(pi, qi,
1518                                              nb_rxd,port->socket_id,
1519                                              &(port->rx_conf), mp);
1520                                 }
1521                                 if (diag == 0)
1522                                         continue;
1523
1524                                 /* Fail to setup rx queue, return */
1525                                 if (rte_atomic16_cmpset(&(port->port_status),
1526                                                         RTE_PORT_HANDLING,
1527                                                         RTE_PORT_STOPPED) == 0)
1528                                         printf("Port %d can not be set back "
1529                                                         "to stopped\n", pi);
1530                                 printf("Fail to configure port %d rx queues\n", pi);
1531                                 /* try to reconfigure queues next time */
1532                                 port->need_reconfig_queues = 1;
1533                                 return -1;
1534                         }
1535                 }
1536
1537                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1538                      event_type < RTE_ETH_EVENT_MAX;
1539                      event_type++) {
1540                         diag = rte_eth_dev_callback_register(pi,
1541                                                         event_type,
1542                                                         eth_event_callback,
1543                                                         NULL);
1544                         if (diag) {
1545                                 printf("Failed to setup even callback for event %d\n",
1546                                         event_type);
1547                                 return -1;
1548                         }
1549                 }
1550
1551                 /* start port */
1552                 if (rte_eth_dev_start(pi) < 0) {
1553                         printf("Fail to start port %d\n", pi);
1554
1555                         /* Fail to setup rx queue, return */
1556                         if (rte_atomic16_cmpset(&(port->port_status),
1557                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1558                                 printf("Port %d can not be set back to "
1559                                                         "stopped\n", pi);
1560                         continue;
1561                 }
1562
1563                 if (rte_atomic16_cmpset(&(port->port_status),
1564                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1565                         printf("Port %d can not be set into started\n", pi);
1566
1567                 rte_eth_macaddr_get(pi, &mac_addr);
1568                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1569                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1570                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1571                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1572
1573                 /* at least one port started, need checking link status */
1574                 need_check_link_status = 1;
1575         }
1576
1577         if (need_check_link_status == 1 && !no_link_check)
1578                 check_all_ports_link_status(RTE_PORT_ALL);
1579         else if (need_check_link_status == 0)
1580                 printf("Please stop the ports first\n");
1581
1582         printf("Done\n");
1583         return 0;
1584 }
1585
1586 void
1587 stop_port(portid_t pid)
1588 {
1589         portid_t pi;
1590         struct rte_port *port;
1591         int need_check_link_status = 0;
1592
1593         if (dcb_test) {
1594                 dcb_test = 0;
1595                 dcb_config = 0;
1596         }
1597
1598         if (port_id_is_invalid(pid, ENABLED_WARN))
1599                 return;
1600
1601         printf("Stopping ports...\n");
1602
1603         RTE_ETH_FOREACH_DEV(pi) {
1604                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1605                         continue;
1606
1607                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1608                         printf("Please remove port %d from forwarding configuration.\n", pi);
1609                         continue;
1610                 }
1611
1612                 if (port_is_bonding_slave(pi)) {
1613                         printf("Please remove port %d from bonded device.\n", pi);
1614                         continue;
1615                 }
1616
1617                 port = &ports[pi];
1618                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1619                                                 RTE_PORT_HANDLING) == 0)
1620                         continue;
1621
1622                 rte_eth_dev_stop(pi);
1623
1624                 if (rte_atomic16_cmpset(&(port->port_status),
1625                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1626                         printf("Port %d can not be set into stopped\n", pi);
1627                 need_check_link_status = 1;
1628         }
1629         if (need_check_link_status && !no_link_check)
1630                 check_all_ports_link_status(RTE_PORT_ALL);
1631
1632         printf("Done\n");
1633 }
1634
1635 void
1636 close_port(portid_t pid)
1637 {
1638         portid_t pi;
1639         struct rte_port *port;
1640
1641         if (port_id_is_invalid(pid, ENABLED_WARN))
1642                 return;
1643
1644         printf("Closing ports...\n");
1645
1646         RTE_ETH_FOREACH_DEV(pi) {
1647                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1648                         continue;
1649
1650                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1651                         printf("Please remove port %d from forwarding configuration.\n", pi);
1652                         continue;
1653                 }
1654
1655                 if (port_is_bonding_slave(pi)) {
1656                         printf("Please remove port %d from bonded device.\n", pi);
1657                         continue;
1658                 }
1659
1660                 port = &ports[pi];
1661                 if (rte_atomic16_cmpset(&(port->port_status),
1662                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1663                         printf("Port %d is already closed\n", pi);
1664                         continue;
1665                 }
1666
1667                 if (rte_atomic16_cmpset(&(port->port_status),
1668                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1669                         printf("Port %d is now not stopped\n", pi);
1670                         continue;
1671                 }
1672
1673                 if (port->flow_list)
1674                         port_flow_flush(pi);
1675                 rte_eth_dev_close(pi);
1676
1677                 if (rte_atomic16_cmpset(&(port->port_status),
1678                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1679                         printf("Port %d cannot be set to closed\n", pi);
1680         }
1681
1682         printf("Done\n");
1683 }
1684
1685 void
1686 reset_port(portid_t pid)
1687 {
1688         int diag;
1689         portid_t pi;
1690         struct rte_port *port;
1691
1692         if (port_id_is_invalid(pid, ENABLED_WARN))
1693                 return;
1694
1695         printf("Resetting ports...\n");
1696
1697         RTE_ETH_FOREACH_DEV(pi) {
1698                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1699                         continue;
1700
1701                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1702                         printf("Please remove port %d from forwarding "
1703                                "configuration.\n", pi);
1704                         continue;
1705                 }
1706
1707                 if (port_is_bonding_slave(pi)) {
1708                         printf("Please remove port %d from bonded device.\n",
1709                                pi);
1710                         continue;
1711                 }
1712
1713                 diag = rte_eth_dev_reset(pi);
1714                 if (diag == 0) {
1715                         port = &ports[pi];
1716                         port->need_reconfig = 1;
1717                         port->need_reconfig_queues = 1;
1718                 } else {
1719                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1720                 }
1721         }
1722
1723         printf("Done\n");
1724 }
1725
1726 void
1727 attach_port(char *identifier)
1728 {
1729         portid_t pi = 0;
1730         unsigned int socket_id;
1731
1732         printf("Attaching a new port...\n");
1733
1734         if (identifier == NULL) {
1735                 printf("Invalid parameters are specified\n");
1736                 return;
1737         }
1738
1739         if (rte_eth_dev_attach(identifier, &pi))
1740                 return;
1741
1742         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1743         /* if socket_id is invalid, set to 0 */
1744         if (check_socket_id(socket_id) < 0)
1745                 socket_id = 0;
1746         reconfig(pi, socket_id);
1747         rte_eth_promiscuous_enable(pi);
1748
1749         nb_ports = rte_eth_dev_count();
1750
1751         ports[pi].port_status = RTE_PORT_STOPPED;
1752
1753         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1754         printf("Done\n");
1755 }
1756
1757 void
1758 detach_port(uint8_t port_id)
1759 {
1760         char name[RTE_ETH_NAME_MAX_LEN];
1761
1762         printf("Detaching a port...\n");
1763
1764         if (!port_is_closed(port_id)) {
1765                 printf("Please close port first\n");
1766                 return;
1767         }
1768
1769         if (ports[port_id].flow_list)
1770                 port_flow_flush(port_id);
1771
1772         if (rte_eth_dev_detach(port_id, name)) {
1773                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1774                 return;
1775         }
1776
1777         nb_ports = rte_eth_dev_count();
1778
1779         printf("Port '%s' is detached. Now total ports is %d\n",
1780                         name, nb_ports);
1781         printf("Done\n");
1782         return;
1783 }
1784
1785 void
1786 pmd_test_exit(void)
1787 {
1788         portid_t pt_id;
1789
1790         if (test_done == 0)
1791                 stop_packet_forwarding();
1792
1793         if (ports != NULL) {
1794                 no_link_check = 1;
1795                 RTE_ETH_FOREACH_DEV(pt_id) {
1796                         printf("\nShutting down port %d...\n", pt_id);
1797                         fflush(stdout);
1798                         stop_port(pt_id);
1799                         close_port(pt_id);
1800                 }
1801         }
1802         printf("\nBye...\n");
1803 }
1804
1805 typedef void (*cmd_func_t)(void);
1806 struct pmd_test_command {
1807         const char *cmd_name;
1808         cmd_func_t cmd_func;
1809 };
1810
1811 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1812
1813 /* Check the link status of all ports in up to 9s, and print them finally */
1814 static void
1815 check_all_ports_link_status(uint32_t port_mask)
1816 {
1817 #define CHECK_INTERVAL 100 /* 100ms */
1818 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1819         uint8_t portid, count, all_ports_up, print_flag = 0;
1820         struct rte_eth_link link;
1821
1822         printf("Checking link statuses...\n");
1823         fflush(stdout);
1824         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1825                 all_ports_up = 1;
1826                 RTE_ETH_FOREACH_DEV(portid) {
1827                         if ((port_mask & (1 << portid)) == 0)
1828                                 continue;
1829                         memset(&link, 0, sizeof(link));
1830                         rte_eth_link_get_nowait(portid, &link);
1831                         /* print link status if flag set */
1832                         if (print_flag == 1) {
1833                                 if (link.link_status)
1834                                         printf("Port %d Link Up - speed %u "
1835                                                 "Mbps - %s\n", (uint8_t)portid,
1836                                                 (unsigned)link.link_speed,
1837                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1838                                         ("full-duplex") : ("half-duplex\n"));
1839                                 else
1840                                         printf("Port %d Link Down\n",
1841                                                 (uint8_t)portid);
1842                                 continue;
1843                         }
1844                         /* clear all_ports_up flag if any link down */
1845                         if (link.link_status == ETH_LINK_DOWN) {
1846                                 all_ports_up = 0;
1847                                 break;
1848                         }
1849                 }
1850                 /* after finally printing all link status, get out */
1851                 if (print_flag == 1)
1852                         break;
1853
1854                 if (all_ports_up == 0) {
1855                         fflush(stdout);
1856                         rte_delay_ms(CHECK_INTERVAL);
1857                 }
1858
1859                 /* set the print_flag if all ports up or timeout */
1860                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1861                         print_flag = 1;
1862                 }
1863
1864                 if (lsc_interrupt)
1865                         break;
1866         }
1867 }
1868
1869 static void
1870 rmv_event_callback(void *arg)
1871 {
1872         struct rte_eth_dev *dev;
1873         uint8_t port_id = (intptr_t)arg;
1874
1875         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1876         dev = &rte_eth_devices[port_id];
1877
1878         stop_port(port_id);
1879         close_port(port_id);
1880         printf("removing device %s\n", dev->device->name);
1881         if (rte_eal_dev_detach(dev->device))
1882                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1883                         dev->device->name);
1884 }
1885
1886 /* This function is used by the interrupt thread */
1887 static int
1888 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1889                   void *ret_param)
1890 {
1891         static const char * const event_desc[] = {
1892                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1893                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1894                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1895                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1896                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1897                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1898                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1899                 [RTE_ETH_EVENT_MAX] = NULL,
1900         };
1901
1902         RTE_SET_USED(param);
1903         RTE_SET_USED(ret_param);
1904
1905         if (type >= RTE_ETH_EVENT_MAX) {
1906                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1907                         port_id, __func__, type);
1908                 fflush(stderr);
1909         } else if (event_print_mask & (UINT32_C(1) << type)) {
1910                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1911                         event_desc[type]);
1912                 fflush(stdout);
1913         }
1914
1915         switch (type) {
1916         case RTE_ETH_EVENT_INTR_RMV:
1917                 if (rte_eal_alarm_set(100000,
1918                                 rmv_event_callback, (void *)(intptr_t)port_id))
1919                         fprintf(stderr, "Could not set up deferred device removal\n");
1920                 break;
1921         default:
1922                 break;
1923         }
1924         return 0;
1925 }
1926
1927 static int
1928 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1929 {
1930         uint16_t i;
1931         int diag;
1932         uint8_t mapping_found = 0;
1933
1934         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1935                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1936                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1937                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1938                                         tx_queue_stats_mappings[i].queue_id,
1939                                         tx_queue_stats_mappings[i].stats_counter_id);
1940                         if (diag != 0)
1941                                 return diag;
1942                         mapping_found = 1;
1943                 }
1944         }
1945         if (mapping_found)
1946                 port->tx_queue_stats_mapping_enabled = 1;
1947         return 0;
1948 }
1949
1950 static int
1951 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1952 {
1953         uint16_t i;
1954         int diag;
1955         uint8_t mapping_found = 0;
1956
1957         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1958                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1959                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1960                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1961                                         rx_queue_stats_mappings[i].queue_id,
1962                                         rx_queue_stats_mappings[i].stats_counter_id);
1963                         if (diag != 0)
1964                                 return diag;
1965                         mapping_found = 1;
1966                 }
1967         }
1968         if (mapping_found)
1969                 port->rx_queue_stats_mapping_enabled = 1;
1970         return 0;
1971 }
1972
1973 static void
1974 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1975 {
1976         int diag = 0;
1977
1978         diag = set_tx_queue_stats_mapping_registers(pi, port);
1979         if (diag != 0) {
1980                 if (diag == -ENOTSUP) {
1981                         port->tx_queue_stats_mapping_enabled = 0;
1982                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1983                 }
1984                 else
1985                         rte_exit(EXIT_FAILURE,
1986                                         "set_tx_queue_stats_mapping_registers "
1987                                         "failed for port id=%d diag=%d\n",
1988                                         pi, diag);
1989         }
1990
1991         diag = set_rx_queue_stats_mapping_registers(pi, port);
1992         if (diag != 0) {
1993                 if (diag == -ENOTSUP) {
1994                         port->rx_queue_stats_mapping_enabled = 0;
1995                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1996                 }
1997                 else
1998                         rte_exit(EXIT_FAILURE,
1999                                         "set_rx_queue_stats_mapping_registers "
2000                                         "failed for port id=%d diag=%d\n",
2001                                         pi, diag);
2002         }
2003 }
2004
2005 static void
2006 rxtx_port_config(struct rte_port *port)
2007 {
2008         port->rx_conf = port->dev_info.default_rxconf;
2009         port->tx_conf = port->dev_info.default_txconf;
2010
2011         /* Check if any RX/TX parameters have been passed */
2012         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2013                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2014
2015         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2016                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2017
2018         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2019                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2020
2021         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2022                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2023
2024         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2025                 port->rx_conf.rx_drop_en = rx_drop_en;
2026
2027         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2028                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2029
2030         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2031                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2032
2033         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2034                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2035
2036         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2037                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2038
2039         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2040                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2041
2042         if (txq_flags != RTE_PMD_PARAM_UNSET)
2043                 port->tx_conf.txq_flags = txq_flags;
2044 }
2045
2046 void
2047 init_port_config(void)
2048 {
2049         portid_t pid;
2050         struct rte_port *port;
2051
2052         RTE_ETH_FOREACH_DEV(pid) {
2053                 port = &ports[pid];
2054                 port->dev_conf.rxmode = rx_mode;
2055                 port->dev_conf.fdir_conf = fdir_conf;
2056                 if (nb_rxq > 1) {
2057                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2058                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2059                 } else {
2060                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2061                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2062                 }
2063
2064                 if (port->dcb_flag == 0) {
2065                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2066                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2067                         else
2068                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2069                 }
2070
2071                 rxtx_port_config(port);
2072
2073                 rte_eth_macaddr_get(pid, &port->eth_addr);
2074
2075                 map_port_queue_stats_mapping_registers(pid, port);
2076 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2077                 rte_pmd_ixgbe_bypass_init(pid);
2078 #endif
2079
2080                 if (lsc_interrupt &&
2081                     (rte_eth_devices[pid].data->dev_flags &
2082                      RTE_ETH_DEV_INTR_LSC))
2083                         port->dev_conf.intr_conf.lsc = 1;
2084                 if (rmv_interrupt &&
2085                     (rte_eth_devices[pid].data->dev_flags &
2086                      RTE_ETH_DEV_INTR_RMV))
2087                         port->dev_conf.intr_conf.rmv = 1;
2088         }
2089 }
2090
2091 void set_port_slave_flag(portid_t slave_pid)
2092 {
2093         struct rte_port *port;
2094
2095         port = &ports[slave_pid];
2096         port->slave_flag = 1;
2097 }
2098
2099 void clear_port_slave_flag(portid_t slave_pid)
2100 {
2101         struct rte_port *port;
2102
2103         port = &ports[slave_pid];
2104         port->slave_flag = 0;
2105 }
2106
2107 uint8_t port_is_bonding_slave(portid_t slave_pid)
2108 {
2109         struct rte_port *port;
2110
2111         port = &ports[slave_pid];
2112         return port->slave_flag;
2113 }
2114
2115 const uint16_t vlan_tags[] = {
2116                 0,  1,  2,  3,  4,  5,  6,  7,
2117                 8,  9, 10, 11,  12, 13, 14, 15,
2118                 16, 17, 18, 19, 20, 21, 22, 23,
2119                 24, 25, 26, 27, 28, 29, 30, 31
2120 };
2121
2122 static  int
2123 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2124                  enum dcb_mode_enable dcb_mode,
2125                  enum rte_eth_nb_tcs num_tcs,
2126                  uint8_t pfc_en)
2127 {
2128         uint8_t i;
2129
2130         /*
2131          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2132          * given above, and the number of traffic classes available for use.
2133          */
2134         if (dcb_mode == DCB_VT_ENABLED) {
2135                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2136                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2137                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2138                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2139
2140                 /* VMDQ+DCB RX and TX configurations */
2141                 vmdq_rx_conf->enable_default_pool = 0;
2142                 vmdq_rx_conf->default_pool = 0;
2143                 vmdq_rx_conf->nb_queue_pools =
2144                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2145                 vmdq_tx_conf->nb_queue_pools =
2146                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2147
2148                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2149                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2150                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2151                         vmdq_rx_conf->pool_map[i].pools =
2152                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2153                 }
2154                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2155                         vmdq_rx_conf->dcb_tc[i] = i;
2156                         vmdq_tx_conf->dcb_tc[i] = i;
2157                 }
2158
2159                 /* set DCB mode of RX and TX of multiple queues */
2160                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2161                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2162         } else {
2163                 struct rte_eth_dcb_rx_conf *rx_conf =
2164                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2165                 struct rte_eth_dcb_tx_conf *tx_conf =
2166                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2167
2168                 rx_conf->nb_tcs = num_tcs;
2169                 tx_conf->nb_tcs = num_tcs;
2170
2171                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2172                         rx_conf->dcb_tc[i] = i % num_tcs;
2173                         tx_conf->dcb_tc[i] = i % num_tcs;
2174                 }
2175                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2176                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2177                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2178         }
2179
2180         if (pfc_en)
2181                 eth_conf->dcb_capability_en =
2182                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2183         else
2184                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2185
2186         return 0;
2187 }
2188
2189 int
2190 init_port_dcb_config(portid_t pid,
2191                      enum dcb_mode_enable dcb_mode,
2192                      enum rte_eth_nb_tcs num_tcs,
2193                      uint8_t pfc_en)
2194 {
2195         struct rte_eth_conf port_conf;
2196         struct rte_port *rte_port;
2197         int retval;
2198         uint16_t i;
2199
2200         rte_port = &ports[pid];
2201
2202         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2203         /* Enter DCB configuration status */
2204         dcb_config = 1;
2205
2206         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2207         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2208         if (retval < 0)
2209                 return retval;
2210         port_conf.rxmode.hw_vlan_filter = 1;
2211
2212         /**
2213          * Write the configuration into the device.
2214          * Set the numbers of RX & TX queues to 0, so
2215          * the RX & TX queues will not be setup.
2216          */
2217         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2218
2219         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2220
2221         /* If dev_info.vmdq_pool_base is greater than 0,
2222          * the queue id of vmdq pools is started after pf queues.
2223          */
2224         if (dcb_mode == DCB_VT_ENABLED &&
2225             rte_port->dev_info.vmdq_pool_base > 0) {
2226                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2227                         " for port %d.", pid);
2228                 return -1;
2229         }
2230
2231         /* Assume the ports in testpmd have the same dcb capability
2232          * and has the same number of rxq and txq in dcb mode
2233          */
2234         if (dcb_mode == DCB_VT_ENABLED) {
2235                 if (rte_port->dev_info.max_vfs > 0) {
2236                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2237                         nb_txq = rte_port->dev_info.nb_tx_queues;
2238                 } else {
2239                         nb_rxq = rte_port->dev_info.max_rx_queues;
2240                         nb_txq = rte_port->dev_info.max_tx_queues;
2241                 }
2242         } else {
2243                 /*if vt is disabled, use all pf queues */
2244                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2245                         nb_rxq = rte_port->dev_info.max_rx_queues;
2246                         nb_txq = rte_port->dev_info.max_tx_queues;
2247                 } else {
2248                         nb_rxq = (queueid_t)num_tcs;
2249                         nb_txq = (queueid_t)num_tcs;
2250
2251                 }
2252         }
2253         rx_free_thresh = 64;
2254
2255         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2256
2257         rxtx_port_config(rte_port);
2258         /* VLAN filter */
2259         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2260         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2261                 rx_vft_set(pid, vlan_tags[i], 1);
2262
2263         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2264         map_port_queue_stats_mapping_registers(pid, rte_port);
2265
2266         rte_port->dcb_flag = 1;
2267
2268         return 0;
2269 }
2270
2271 static void
2272 init_port(void)
2273 {
2274         /* Configuration of Ethernet ports. */
2275         ports = rte_zmalloc("testpmd: ports",
2276                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2277                             RTE_CACHE_LINE_SIZE);
2278         if (ports == NULL) {
2279                 rte_exit(EXIT_FAILURE,
2280                                 "rte_zmalloc(%d struct rte_port) failed\n",
2281                                 RTE_MAX_ETHPORTS);
2282         }
2283 }
2284
2285 static void
2286 force_quit(void)
2287 {
2288         pmd_test_exit();
2289         prompt_exit();
2290 }
2291
2292 static void
2293 print_stats(void)
2294 {
2295         uint8_t i;
2296         const char clr[] = { 27, '[', '2', 'J', '\0' };
2297         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2298
2299         /* Clear screen and move to top left */
2300         printf("%s%s", clr, top_left);
2301
2302         printf("\nPort statistics ====================================");
2303         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2304                 nic_stats_display(fwd_ports_ids[i]);
2305 }
2306
2307 static void
2308 signal_handler(int signum)
2309 {
2310         if (signum == SIGINT || signum == SIGTERM) {
2311                 printf("\nSignal %d received, preparing to exit...\n",
2312                                 signum);
2313 #ifdef RTE_LIBRTE_PDUMP
2314                 /* uninitialize packet capture framework */
2315                 rte_pdump_uninit();
2316 #endif
2317 #ifdef RTE_LIBRTE_LATENCY_STATS
2318                 rte_latencystats_uninit();
2319 #endif
2320                 force_quit();
2321                 /* exit with the expected status */
2322                 signal(signum, SIG_DFL);
2323                 kill(getpid(), signum);
2324         }
2325 }
2326
2327 int
2328 main(int argc, char** argv)
2329 {
2330         int  diag;
2331         uint8_t port_id;
2332
2333         signal(SIGINT, signal_handler);
2334         signal(SIGTERM, signal_handler);
2335
2336         diag = rte_eal_init(argc, argv);
2337         if (diag < 0)
2338                 rte_panic("Cannot init EAL\n");
2339
2340 #ifdef RTE_LIBRTE_PDUMP
2341         /* initialize packet capture framework */
2342         rte_pdump_init(NULL);
2343 #endif
2344
2345         nb_ports = (portid_t) rte_eth_dev_count();
2346         if (nb_ports == 0)
2347                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2348
2349         /* allocate port structures, and init them */
2350         init_port();
2351
2352         set_def_fwd_config();
2353         if (nb_lcores == 0)
2354                 rte_panic("Empty set of forwarding logical cores - check the "
2355                           "core mask supplied in the command parameters\n");
2356
2357         /* Bitrate/latency stats disabled by default */
2358 #ifdef RTE_LIBRTE_BITRATE
2359         bitrate_enabled = 0;
2360 #endif
2361 #ifdef RTE_LIBRTE_LATENCY_STATS
2362         latencystats_enabled = 0;
2363 #endif
2364
2365         argc -= diag;
2366         argv += diag;
2367         if (argc > 1)
2368                 launch_args_parse(argc, argv);
2369
2370         if (tx_first && interactive)
2371                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2372                                 "interactive mode.\n");
2373
2374         if (tx_first && lsc_interrupt) {
2375                 printf("Warning: lsc_interrupt needs to be off when "
2376                                 " using tx_first. Disabling.\n");
2377                 lsc_interrupt = 0;
2378         }
2379
2380         if (!nb_rxq && !nb_txq)
2381                 printf("Warning: Either rx or tx queues should be non-zero\n");
2382
2383         if (nb_rxq > 1 && nb_rxq > nb_txq)
2384                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2385                        "but nb_txq=%d will prevent to fully test it.\n",
2386                        nb_rxq, nb_txq);
2387
2388         init_config();
2389         if (start_port(RTE_PORT_ALL) != 0)
2390                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2391
2392         /* set all ports to promiscuous mode by default */
2393         RTE_ETH_FOREACH_DEV(port_id)
2394                 rte_eth_promiscuous_enable(port_id);
2395
2396         /* Init metrics library */
2397         rte_metrics_init(rte_socket_id());
2398
2399 #ifdef RTE_LIBRTE_LATENCY_STATS
2400         if (latencystats_enabled != 0) {
2401                 int ret = rte_latencystats_init(1, NULL);
2402                 if (ret)
2403                         printf("Warning: latencystats init()"
2404                                 " returned error %d\n", ret);
2405                 printf("Latencystats running on lcore %d\n",
2406                         latencystats_lcore_id);
2407         }
2408 #endif
2409
2410         /* Setup bitrate stats */
2411 #ifdef RTE_LIBRTE_BITRATE
2412         if (bitrate_enabled != 0) {
2413                 bitrate_data = rte_stats_bitrate_create();
2414                 if (bitrate_data == NULL)
2415                         rte_exit(EXIT_FAILURE,
2416                                 "Could not allocate bitrate data.\n");
2417                 rte_stats_bitrate_reg(bitrate_data);
2418         }
2419 #endif
2420
2421 #ifdef RTE_LIBRTE_CMDLINE
2422         if (strlen(cmdline_filename) != 0)
2423                 cmdline_read_from_file(cmdline_filename);
2424
2425         if (interactive == 1) {
2426                 if (auto_start) {
2427                         printf("Start automatic packet forwarding\n");
2428                         start_packet_forwarding(0);
2429                 }
2430                 prompt();
2431                 pmd_test_exit();
2432         } else
2433 #endif
2434         {
2435                 char c;
2436                 int rc;
2437
2438                 printf("No commandline core given, start packet forwarding\n");
2439                 start_packet_forwarding(tx_first);
2440                 if (stats_period != 0) {
2441                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2442                         uint64_t timer_period;
2443
2444                         /* Convert to number of cycles */
2445                         timer_period = stats_period * rte_get_timer_hz();
2446
2447                         while (1) {
2448                                 cur_time = rte_get_timer_cycles();
2449                                 diff_time += cur_time - prev_time;
2450
2451                                 if (diff_time >= timer_period) {
2452                                         print_stats();
2453                                         /* Reset the timer */
2454                                         diff_time = 0;
2455                                 }
2456                                 /* Sleep to avoid unnecessary checks */
2457                                 prev_time = cur_time;
2458                                 sleep(1);
2459                         }
2460                 }
2461
2462                 printf("Press enter to exit\n");
2463                 rc = read(0, &c, 1);
2464                 pmd_test_exit();
2465                 if (rc < 0)
2466                         return 1;
2467         }
2468
2469         return 0;
2470 }