app/testpmd: avoid pages being swapped out
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 #include <rte_gro.h>
92
93 #include "testpmd.h"
94
95 uint16_t verbose_level = 0; /**< Silent by default. */
96
97 /* use master core for command line ? */
98 uint8_t interactive = 0;
99 uint8_t auto_start = 0;
100 uint8_t tx_first;
101 char cmdline_filename[PATH_MAX] = {0};
102
103 /*
104  * NUMA support configuration.
105  * When set, the NUMA support attempts to dispatch the allocation of the
106  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
107  * probed ports among the CPU sockets 0 and 1.
108  * Otherwise, all memory is allocated from CPU socket 0.
109  */
110 uint8_t numa_support = 1; /**< numa enabled by default */
111
112 /*
113  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114  * not configured.
115  */
116 uint8_t socket_num = UMA_NO_CONFIG;
117
118 /*
119  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120  */
121 uint8_t mp_anon = 0;
122
123 /*
124  * Record the Ethernet address of peer target ports to which packets are
125  * forwarded.
126  * Must be instantiated with the ethernet addresses of peer traffic generator
127  * ports.
128  */
129 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
130 portid_t nb_peer_eth_addrs = 0;
131
132 /*
133  * Probed Target Environment.
134  */
135 struct rte_port *ports;        /**< For all probed ethernet ports. */
136 portid_t nb_ports;             /**< Number of probed ethernet ports. */
137 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
138 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
139
140 /*
141  * Test Forwarding Configuration.
142  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
143  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
144  */
145 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
146 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
147 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
148 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
149
150 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
151 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
152
153 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
154 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
155
156 /*
157  * Forwarding engines.
158  */
159 struct fwd_engine * fwd_engines[] = {
160         &io_fwd_engine,
161         &mac_fwd_engine,
162         &mac_swap_engine,
163         &flow_gen_engine,
164         &rx_only_engine,
165         &tx_only_engine,
166         &csum_fwd_engine,
167         &icmp_echo_engine,
168 #ifdef RTE_LIBRTE_IEEE1588
169         &ieee1588_fwd_engine,
170 #endif
171         NULL,
172 };
173
174 struct fwd_config cur_fwd_config;
175 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
176 uint32_t retry_enabled;
177 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
178 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
179
180 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
181 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
182                                       * specified on command-line. */
183 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189         TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  */
214 #define RTE_TEST_RX_DESC_DEFAULT 128
215 #define RTE_TEST_TX_DESC_DEFAULT 512
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218
219 #define RTE_PMD_PARAM_UNSET -1
220 /*
221  * Configurable values of RX and TX ring threshold registers.
222  */
223
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
231
232 /*
233  * Configurable value of RX free threshold.
234  */
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
236
237 /*
238  * Configurable value of RX drop enable.
239  */
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
241
242 /*
243  * Configurable value of TX free threshold.
244  */
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
246
247 /*
248  * Configurable value of TX RS bit threshold.
249  */
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
251
252 /*
253  * Configurable value of TX queue flags.
254  */
255 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
256
257 /*
258  * Receive Side Scaling (RSS) configuration.
259  */
260 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
261
262 /*
263  * Port topology configuration
264  */
265 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
266
267 /*
268  * Avoids to flush all the RX streams before starts forwarding.
269  */
270 uint8_t no_flush_rx = 0; /* flush by default */
271
272 /*
273  * Flow API isolated mode.
274  */
275 uint8_t flow_isolate_all;
276
277 /*
278  * Avoids to check link status when starting/stopping a port.
279  */
280 uint8_t no_link_check = 0; /* check by default */
281
282 /*
283  * Enable link status change notification
284  */
285 uint8_t lsc_interrupt = 1; /* enabled by default */
286
287 /*
288  * Enable device removal notification.
289  */
290 uint8_t rmv_interrupt = 1; /* enabled by default */
291
292 /*
293  * Display or mask ether events
294  * Default to all events except VF_MBOX
295  */
296 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
297                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
298                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
299                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
300                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302
303 /*
304  * NIC bypass mode configuration options.
305  */
306
307 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
308 /* The NIC bypass watchdog timeout. */
309 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
310 #endif
311
312
313 #ifdef RTE_LIBRTE_LATENCY_STATS
314
315 /*
316  * Set when latency stats is enabled in the commandline
317  */
318 uint8_t latencystats_enabled;
319
320 /*
321  * Lcore ID to serive latency statistics.
322  */
323 lcoreid_t latencystats_lcore_id = -1;
324
325 #endif
326
327 /*
328  * Ethernet device configuration.
329  */
330 struct rte_eth_rxmode rx_mode = {
331         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
332         .split_hdr_size = 0,
333         .header_split   = 0, /**< Header Split disabled. */
334         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
335         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
336         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
337         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
338         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
339         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
340 };
341
342 struct rte_fdir_conf fdir_conf = {
343         .mode = RTE_FDIR_MODE_NONE,
344         .pballoc = RTE_FDIR_PBALLOC_64K,
345         .status = RTE_FDIR_REPORT_STATUS,
346         .mask = {
347                 .vlan_tci_mask = 0x0,
348                 .ipv4_mask     = {
349                         .src_ip = 0xFFFFFFFF,
350                         .dst_ip = 0xFFFFFFFF,
351                 },
352                 .ipv6_mask     = {
353                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355                 },
356                 .src_port_mask = 0xFFFF,
357                 .dst_port_mask = 0xFFFF,
358                 .mac_addr_byte_mask = 0xFF,
359                 .tunnel_type_mask = 1,
360                 .tunnel_id_mask = 0xFFFFFFFF,
361         },
362         .drop_queue = 127,
363 };
364
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
366
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
369
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
372
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
375
376 unsigned int num_sockets = 0;
377 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
378
379 #ifdef RTE_LIBRTE_BITRATE
380 /* Bitrate statistics */
381 struct rte_stats_bitrates *bitrate_data;
382 lcoreid_t bitrate_lcore_id;
383 uint8_t bitrate_enabled;
384 #endif
385
386 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
387
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392                               enum rte_eth_event_type type,
393                               void *param, void *ret_param);
394
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400
401 /*
402  * Helper function to check if socket is already discovered.
403  * If yes, return positive value. If not, return zero.
404  */
405 int
406 new_socket_id(unsigned int socket_id)
407 {
408         unsigned int i;
409
410         for (i = 0; i < num_sockets; i++) {
411                 if (socket_ids[i] == socket_id)
412                         return 0;
413         }
414         return 1;
415 }
416
417 /*
418  * Setup default configuration.
419  */
420 static void
421 set_default_fwd_lcores_config(void)
422 {
423         unsigned int i;
424         unsigned int nb_lc;
425         unsigned int sock_num;
426
427         nb_lc = 0;
428         for (i = 0; i < RTE_MAX_LCORE; i++) {
429                 sock_num = rte_lcore_to_socket_id(i);
430                 if (new_socket_id(sock_num)) {
431                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
432                                 rte_exit(EXIT_FAILURE,
433                                          "Total sockets greater than %u\n",
434                                          RTE_MAX_NUMA_NODES);
435                         }
436                         socket_ids[num_sockets++] = sock_num;
437                 }
438                 if (!rte_lcore_is_enabled(i))
439                         continue;
440                 if (i == rte_get_master_lcore())
441                         continue;
442                 fwd_lcores_cpuids[nb_lc++] = i;
443         }
444         nb_lcores = (lcoreid_t) nb_lc;
445         nb_cfg_lcores = nb_lcores;
446         nb_fwd_lcores = 1;
447 }
448
449 static void
450 set_def_peer_eth_addrs(void)
451 {
452         portid_t i;
453
454         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
455                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
456                 peer_eth_addrs[i].addr_bytes[5] = i;
457         }
458 }
459
460 static void
461 set_default_fwd_ports_config(void)
462 {
463         portid_t pt_id;
464
465         for (pt_id = 0; pt_id < nb_ports; pt_id++)
466                 fwd_ports_ids[pt_id] = pt_id;
467
468         nb_cfg_ports = nb_ports;
469         nb_fwd_ports = nb_ports;
470 }
471
472 void
473 set_def_fwd_config(void)
474 {
475         set_default_fwd_lcores_config();
476         set_def_peer_eth_addrs();
477         set_default_fwd_ports_config();
478 }
479
480 /*
481  * Configuration initialisation done once at init time.
482  */
483 static void
484 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
485                  unsigned int socket_id)
486 {
487         char pool_name[RTE_MEMPOOL_NAMESIZE];
488         struct rte_mempool *rte_mp = NULL;
489         uint32_t mb_size;
490
491         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
492         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
493
494         RTE_LOG(INFO, USER1,
495                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
496                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
497
498         if (mp_anon != 0) {
499                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
500                         mb_size, (unsigned) mb_mempool_cache,
501                         sizeof(struct rte_pktmbuf_pool_private),
502                         socket_id, 0);
503                 if (rte_mp == NULL)
504                         goto err;
505
506                 if (rte_mempool_populate_anon(rte_mp) == 0) {
507                         rte_mempool_free(rte_mp);
508                         rte_mp = NULL;
509                         goto err;
510                 }
511                 rte_pktmbuf_pool_init(rte_mp, NULL);
512                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
513         } else {
514                 /* wrapper to rte_mempool_create() */
515                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
516                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
517         }
518
519 err:
520         if (rte_mp == NULL) {
521                 rte_exit(EXIT_FAILURE,
522                         "Creation of mbuf pool for socket %u failed: %s\n",
523                         socket_id, rte_strerror(rte_errno));
524         } else if (verbose_level > 0) {
525                 rte_mempool_dump(stdout, rte_mp);
526         }
527 }
528
529 /*
530  * Check given socket id is valid or not with NUMA mode,
531  * if valid, return 0, else return -1
532  */
533 static int
534 check_socket_id(const unsigned int socket_id)
535 {
536         static int warning_once = 0;
537
538         if (new_socket_id(socket_id)) {
539                 if (!warning_once && numa_support)
540                         printf("Warning: NUMA should be configured manually by"
541                                " using --port-numa-config and"
542                                " --ring-numa-config parameters along with"
543                                " --numa.\n");
544                 warning_once = 1;
545                 return -1;
546         }
547         return 0;
548 }
549
550 static void
551 init_config(void)
552 {
553         portid_t pid;
554         struct rte_port *port;
555         struct rte_mempool *mbp;
556         unsigned int nb_mbuf_per_pool;
557         lcoreid_t  lc_id;
558         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
559
560         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
561
562         if (numa_support) {
563                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
564                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
565                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
566         }
567
568         /* Configuration of logical cores. */
569         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
570                                 sizeof(struct fwd_lcore *) * nb_lcores,
571                                 RTE_CACHE_LINE_SIZE);
572         if (fwd_lcores == NULL) {
573                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
574                                                         "failed\n", nb_lcores);
575         }
576         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
577                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
578                                                sizeof(struct fwd_lcore),
579                                                RTE_CACHE_LINE_SIZE);
580                 if (fwd_lcores[lc_id] == NULL) {
581                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
582                                                                 "failed\n");
583                 }
584                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
585         }
586
587         RTE_ETH_FOREACH_DEV(pid) {
588                 port = &ports[pid];
589                 rte_eth_dev_info_get(pid, &port->dev_info);
590
591                 if (numa_support) {
592                         if (port_numa[pid] != NUMA_NO_CONFIG)
593                                 port_per_socket[port_numa[pid]]++;
594                         else {
595                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
596
597                                 /* if socket_id is invalid, set to 0 */
598                                 if (check_socket_id(socket_id) < 0)
599                                         socket_id = 0;
600                                 port_per_socket[socket_id]++;
601                         }
602                 }
603
604                 /* set flag to initialize port/queue */
605                 port->need_reconfig = 1;
606                 port->need_reconfig_queues = 1;
607         }
608
609         /*
610          * Create pools of mbuf.
611          * If NUMA support is disabled, create a single pool of mbuf in
612          * socket 0 memory by default.
613          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
614          *
615          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
616          * nb_txd can be configured at run time.
617          */
618         if (param_total_num_mbufs)
619                 nb_mbuf_per_pool = param_total_num_mbufs;
620         else {
621                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
622                         (nb_lcores * mb_mempool_cache) +
623                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
624                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
625         }
626
627         if (numa_support) {
628                 uint8_t i;
629
630                 for (i = 0; i < num_sockets; i++)
631                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
632                                          socket_ids[i]);
633         } else {
634                 if (socket_num == UMA_NO_CONFIG)
635                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
636                 else
637                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
638                                                  socket_num);
639         }
640
641         init_port_config();
642
643         /*
644          * Records which Mbuf pool to use by each logical core, if needed.
645          */
646         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
647                 mbp = mbuf_pool_find(
648                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
649
650                 if (mbp == NULL)
651                         mbp = mbuf_pool_find(0);
652                 fwd_lcores[lc_id]->mbp = mbp;
653         }
654
655         /* Configuration of packet forwarding streams. */
656         if (init_fwd_streams() < 0)
657                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
658
659         fwd_config_setup();
660 }
661
662
663 void
664 reconfig(portid_t new_port_id, unsigned socket_id)
665 {
666         struct rte_port *port;
667
668         /* Reconfiguration of Ethernet ports. */
669         port = &ports[new_port_id];
670         rte_eth_dev_info_get(new_port_id, &port->dev_info);
671
672         /* set flag to initialize port/queue */
673         port->need_reconfig = 1;
674         port->need_reconfig_queues = 1;
675         port->socket_id = socket_id;
676
677         init_port_config();
678 }
679
680
681 int
682 init_fwd_streams(void)
683 {
684         portid_t pid;
685         struct rte_port *port;
686         streamid_t sm_id, nb_fwd_streams_new;
687         queueid_t q;
688
689         /* set socket id according to numa or not */
690         RTE_ETH_FOREACH_DEV(pid) {
691                 port = &ports[pid];
692                 if (nb_rxq > port->dev_info.max_rx_queues) {
693                         printf("Fail: nb_rxq(%d) is greater than "
694                                 "max_rx_queues(%d)\n", nb_rxq,
695                                 port->dev_info.max_rx_queues);
696                         return -1;
697                 }
698                 if (nb_txq > port->dev_info.max_tx_queues) {
699                         printf("Fail: nb_txq(%d) is greater than "
700                                 "max_tx_queues(%d)\n", nb_txq,
701                                 port->dev_info.max_tx_queues);
702                         return -1;
703                 }
704                 if (numa_support) {
705                         if (port_numa[pid] != NUMA_NO_CONFIG)
706                                 port->socket_id = port_numa[pid];
707                         else {
708                                 port->socket_id = rte_eth_dev_socket_id(pid);
709
710                                 /* if socket_id is invalid, set to 0 */
711                                 if (check_socket_id(port->socket_id) < 0)
712                                         port->socket_id = 0;
713                         }
714                 }
715                 else {
716                         if (socket_num == UMA_NO_CONFIG)
717                                 port->socket_id = 0;
718                         else
719                                 port->socket_id = socket_num;
720                 }
721         }
722
723         q = RTE_MAX(nb_rxq, nb_txq);
724         if (q == 0) {
725                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
726                 return -1;
727         }
728         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
729         if (nb_fwd_streams_new == nb_fwd_streams)
730                 return 0;
731         /* clear the old */
732         if (fwd_streams != NULL) {
733                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
734                         if (fwd_streams[sm_id] == NULL)
735                                 continue;
736                         rte_free(fwd_streams[sm_id]);
737                         fwd_streams[sm_id] = NULL;
738                 }
739                 rte_free(fwd_streams);
740                 fwd_streams = NULL;
741         }
742
743         /* init new */
744         nb_fwd_streams = nb_fwd_streams_new;
745         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
746                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
747         if (fwd_streams == NULL)
748                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
749                                                 "failed\n", nb_fwd_streams);
750
751         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
752                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
753                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
754                 if (fwd_streams[sm_id] == NULL)
755                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
756                                                                 " failed\n");
757         }
758
759         return 0;
760 }
761
762 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
763 static void
764 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
765 {
766         unsigned int total_burst;
767         unsigned int nb_burst;
768         unsigned int burst_stats[3];
769         uint16_t pktnb_stats[3];
770         uint16_t nb_pkt;
771         int burst_percent[3];
772
773         /*
774          * First compute the total number of packet bursts and the
775          * two highest numbers of bursts of the same number of packets.
776          */
777         total_burst = 0;
778         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
779         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
780         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
781                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
782                 if (nb_burst == 0)
783                         continue;
784                 total_burst += nb_burst;
785                 if (nb_burst > burst_stats[0]) {
786                         burst_stats[1] = burst_stats[0];
787                         pktnb_stats[1] = pktnb_stats[0];
788                         burst_stats[0] = nb_burst;
789                         pktnb_stats[0] = nb_pkt;
790                 }
791         }
792         if (total_burst == 0)
793                 return;
794         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
795         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
796                burst_percent[0], (int) pktnb_stats[0]);
797         if (burst_stats[0] == total_burst) {
798                 printf("]\n");
799                 return;
800         }
801         if (burst_stats[0] + burst_stats[1] == total_burst) {
802                 printf(" + %d%% of %d pkts]\n",
803                        100 - burst_percent[0], pktnb_stats[1]);
804                 return;
805         }
806         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
807         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
808         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
809                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
810                 return;
811         }
812         printf(" + %d%% of %d pkts + %d%% of others]\n",
813                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
814 }
815 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
816
817 static void
818 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
819 {
820         struct rte_port *port;
821         uint8_t i;
822
823         static const char *fwd_stats_border = "----------------------";
824
825         port = &ports[port_id];
826         printf("\n  %s Forward statistics for port %-2d %s\n",
827                fwd_stats_border, port_id, fwd_stats_border);
828
829         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
830                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
831                        "%-"PRIu64"\n",
832                        stats->ipackets, stats->imissed,
833                        (uint64_t) (stats->ipackets + stats->imissed));
834
835                 if (cur_fwd_eng == &csum_fwd_engine)
836                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
837                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
838                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
839                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
840                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
841                 }
842
843                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
844                        "%-"PRIu64"\n",
845                        stats->opackets, port->tx_dropped,
846                        (uint64_t) (stats->opackets + port->tx_dropped));
847         }
848         else {
849                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
850                        "%14"PRIu64"\n",
851                        stats->ipackets, stats->imissed,
852                        (uint64_t) (stats->ipackets + stats->imissed));
853
854                 if (cur_fwd_eng == &csum_fwd_engine)
855                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
856                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
857                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
858                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
859                         printf("  RX-nombufs:             %14"PRIu64"\n",
860                                stats->rx_nombuf);
861                 }
862
863                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
864                        "%14"PRIu64"\n",
865                        stats->opackets, port->tx_dropped,
866                        (uint64_t) (stats->opackets + port->tx_dropped));
867         }
868
869 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
870         if (port->rx_stream)
871                 pkt_burst_stats_display("RX",
872                         &port->rx_stream->rx_burst_stats);
873         if (port->tx_stream)
874                 pkt_burst_stats_display("TX",
875                         &port->tx_stream->tx_burst_stats);
876 #endif
877
878         if (port->rx_queue_stats_mapping_enabled) {
879                 printf("\n");
880                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
881                         printf("  Stats reg %2d RX-packets:%14"PRIu64
882                                "     RX-errors:%14"PRIu64
883                                "    RX-bytes:%14"PRIu64"\n",
884                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
885                 }
886                 printf("\n");
887         }
888         if (port->tx_queue_stats_mapping_enabled) {
889                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
890                         printf("  Stats reg %2d TX-packets:%14"PRIu64
891                                "                                 TX-bytes:%14"PRIu64"\n",
892                                i, stats->q_opackets[i], stats->q_obytes[i]);
893                 }
894         }
895
896         printf("  %s--------------------------------%s\n",
897                fwd_stats_border, fwd_stats_border);
898 }
899
900 static void
901 fwd_stream_stats_display(streamid_t stream_id)
902 {
903         struct fwd_stream *fs;
904         static const char *fwd_top_stats_border = "-------";
905
906         fs = fwd_streams[stream_id];
907         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
908             (fs->fwd_dropped == 0))
909                 return;
910         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
911                "TX Port=%2d/Queue=%2d %s\n",
912                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
913                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
914         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
915                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
916
917         /* if checksum mode */
918         if (cur_fwd_eng == &csum_fwd_engine) {
919                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
920                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
921         }
922
923 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
924         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
925         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
926 #endif
927 }
928
929 static void
930 flush_fwd_rx_queues(void)
931 {
932         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
933         portid_t  rxp;
934         portid_t port_id;
935         queueid_t rxq;
936         uint16_t  nb_rx;
937         uint16_t  i;
938         uint8_t   j;
939         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
940         uint64_t timer_period;
941
942         /* convert to number of cycles */
943         timer_period = rte_get_timer_hz(); /* 1 second timeout */
944
945         for (j = 0; j < 2; j++) {
946                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
947                         for (rxq = 0; rxq < nb_rxq; rxq++) {
948                                 port_id = fwd_ports_ids[rxp];
949                                 /**
950                                 * testpmd can stuck in the below do while loop
951                                 * if rte_eth_rx_burst() always returns nonzero
952                                 * packets. So timer is added to exit this loop
953                                 * after 1sec timer expiry.
954                                 */
955                                 prev_tsc = rte_rdtsc();
956                                 do {
957                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
958                                                 pkts_burst, MAX_PKT_BURST);
959                                         for (i = 0; i < nb_rx; i++)
960                                                 rte_pktmbuf_free(pkts_burst[i]);
961
962                                         cur_tsc = rte_rdtsc();
963                                         diff_tsc = cur_tsc - prev_tsc;
964                                         timer_tsc += diff_tsc;
965                                 } while ((nb_rx > 0) &&
966                                         (timer_tsc < timer_period));
967                                 timer_tsc = 0;
968                         }
969                 }
970                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
971         }
972 }
973
974 static void
975 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
976 {
977         struct fwd_stream **fsm;
978         streamid_t nb_fs;
979         streamid_t sm_id;
980 #ifdef RTE_LIBRTE_BITRATE
981         uint64_t tics_per_1sec;
982         uint64_t tics_datum;
983         uint64_t tics_current;
984         uint8_t idx_port, cnt_ports;
985
986         cnt_ports = rte_eth_dev_count();
987         tics_datum = rte_rdtsc();
988         tics_per_1sec = rte_get_timer_hz();
989 #endif
990         fsm = &fwd_streams[fc->stream_idx];
991         nb_fs = fc->stream_nb;
992         do {
993                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
994                         (*pkt_fwd)(fsm[sm_id]);
995 #ifdef RTE_LIBRTE_BITRATE
996                 if (bitrate_enabled != 0 &&
997                                 bitrate_lcore_id == rte_lcore_id()) {
998                         tics_current = rte_rdtsc();
999                         if (tics_current - tics_datum >= tics_per_1sec) {
1000                                 /* Periodic bitrate calculation */
1001                                 for (idx_port = 0;
1002                                                 idx_port < cnt_ports;
1003                                                 idx_port++)
1004                                         rte_stats_bitrate_calc(bitrate_data,
1005                                                 idx_port);
1006                                 tics_datum = tics_current;
1007                         }
1008                 }
1009 #endif
1010 #ifdef RTE_LIBRTE_LATENCY_STATS
1011                 if (latencystats_enabled != 0 &&
1012                                 latencystats_lcore_id == rte_lcore_id())
1013                         rte_latencystats_update();
1014 #endif
1015
1016         } while (! fc->stopped);
1017 }
1018
1019 static int
1020 start_pkt_forward_on_core(void *fwd_arg)
1021 {
1022         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1023                              cur_fwd_config.fwd_eng->packet_fwd);
1024         return 0;
1025 }
1026
1027 /*
1028  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1029  * Used to start communication flows in network loopback test configurations.
1030  */
1031 static int
1032 run_one_txonly_burst_on_core(void *fwd_arg)
1033 {
1034         struct fwd_lcore *fwd_lc;
1035         struct fwd_lcore tmp_lcore;
1036
1037         fwd_lc = (struct fwd_lcore *) fwd_arg;
1038         tmp_lcore = *fwd_lc;
1039         tmp_lcore.stopped = 1;
1040         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1041         return 0;
1042 }
1043
1044 /*
1045  * Launch packet forwarding:
1046  *     - Setup per-port forwarding context.
1047  *     - launch logical cores with their forwarding configuration.
1048  */
1049 static void
1050 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1051 {
1052         port_fwd_begin_t port_fwd_begin;
1053         unsigned int i;
1054         unsigned int lc_id;
1055         int diag;
1056
1057         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1058         if (port_fwd_begin != NULL) {
1059                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1060                         (*port_fwd_begin)(fwd_ports_ids[i]);
1061         }
1062         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1063                 lc_id = fwd_lcores_cpuids[i];
1064                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1065                         fwd_lcores[i]->stopped = 0;
1066                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1067                                                      fwd_lcores[i], lc_id);
1068                         if (diag != 0)
1069                                 printf("launch lcore %u failed - diag=%d\n",
1070                                        lc_id, diag);
1071                 }
1072         }
1073 }
1074
1075 /*
1076  * Launch packet forwarding configuration.
1077  */
1078 void
1079 start_packet_forwarding(int with_tx_first)
1080 {
1081         port_fwd_begin_t port_fwd_begin;
1082         port_fwd_end_t  port_fwd_end;
1083         struct rte_port *port;
1084         unsigned int i;
1085         portid_t   pt_id;
1086         streamid_t sm_id;
1087
1088         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1089                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1090
1091         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1092                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1093
1094         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1095                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1096                 (!nb_rxq || !nb_txq))
1097                 rte_exit(EXIT_FAILURE,
1098                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1099                         cur_fwd_eng->fwd_mode_name);
1100
1101         if (all_ports_started() == 0) {
1102                 printf("Not all ports were started\n");
1103                 return;
1104         }
1105         if (test_done == 0) {
1106                 printf("Packet forwarding already started\n");
1107                 return;
1108         }
1109
1110         if (init_fwd_streams() < 0) {
1111                 printf("Fail from init_fwd_streams()\n");
1112                 return;
1113         }
1114
1115         if(dcb_test) {
1116                 for (i = 0; i < nb_fwd_ports; i++) {
1117                         pt_id = fwd_ports_ids[i];
1118                         port = &ports[pt_id];
1119                         if (!port->dcb_flag) {
1120                                 printf("In DCB mode, all forwarding ports must "
1121                                        "be configured in this mode.\n");
1122                                 return;
1123                         }
1124                 }
1125                 if (nb_fwd_lcores == 1) {
1126                         printf("In DCB mode,the nb forwarding cores "
1127                                "should be larger than 1.\n");
1128                         return;
1129                 }
1130         }
1131         test_done = 0;
1132
1133         if(!no_flush_rx)
1134                 flush_fwd_rx_queues();
1135
1136         fwd_config_setup();
1137         pkt_fwd_config_display(&cur_fwd_config);
1138         rxtx_config_display();
1139
1140         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1141                 pt_id = fwd_ports_ids[i];
1142                 port = &ports[pt_id];
1143                 rte_eth_stats_get(pt_id, &port->stats);
1144                 port->tx_dropped = 0;
1145
1146                 map_port_queue_stats_mapping_registers(pt_id, port);
1147         }
1148         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1149                 fwd_streams[sm_id]->rx_packets = 0;
1150                 fwd_streams[sm_id]->tx_packets = 0;
1151                 fwd_streams[sm_id]->fwd_dropped = 0;
1152                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1153                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1154
1155 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1156                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1157                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1158                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1159                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1160 #endif
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1162                 fwd_streams[sm_id]->core_cycles = 0;
1163 #endif
1164         }
1165         if (with_tx_first) {
1166                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1167                 if (port_fwd_begin != NULL) {
1168                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1169                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1170                 }
1171                 while (with_tx_first--) {
1172                         launch_packet_forwarding(
1173                                         run_one_txonly_burst_on_core);
1174                         rte_eal_mp_wait_lcore();
1175                 }
1176                 port_fwd_end = tx_only_engine.port_fwd_end;
1177                 if (port_fwd_end != NULL) {
1178                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1179                                 (*port_fwd_end)(fwd_ports_ids[i]);
1180                 }
1181         }
1182         launch_packet_forwarding(start_pkt_forward_on_core);
1183 }
1184
1185 void
1186 stop_packet_forwarding(void)
1187 {
1188         struct rte_eth_stats stats;
1189         struct rte_port *port;
1190         port_fwd_end_t  port_fwd_end;
1191         int i;
1192         portid_t   pt_id;
1193         streamid_t sm_id;
1194         lcoreid_t  lc_id;
1195         uint64_t total_recv;
1196         uint64_t total_xmit;
1197         uint64_t total_rx_dropped;
1198         uint64_t total_tx_dropped;
1199         uint64_t total_rx_nombuf;
1200         uint64_t tx_dropped;
1201         uint64_t rx_bad_ip_csum;
1202         uint64_t rx_bad_l4_csum;
1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1204         uint64_t fwd_cycles;
1205 #endif
1206         static const char *acc_stats_border = "+++++++++++++++";
1207
1208         if (test_done) {
1209                 printf("Packet forwarding not started\n");
1210                 return;
1211         }
1212         printf("Telling cores to stop...");
1213         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1214                 fwd_lcores[lc_id]->stopped = 1;
1215         printf("\nWaiting for lcores to finish...\n");
1216         rte_eal_mp_wait_lcore();
1217         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1218         if (port_fwd_end != NULL) {
1219                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1220                         pt_id = fwd_ports_ids[i];
1221                         (*port_fwd_end)(pt_id);
1222                 }
1223         }
1224 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225         fwd_cycles = 0;
1226 #endif
1227         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1228                 if (cur_fwd_config.nb_fwd_streams >
1229                     cur_fwd_config.nb_fwd_ports) {
1230                         fwd_stream_stats_display(sm_id);
1231                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1232                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1233                 } else {
1234                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1235                                 fwd_streams[sm_id];
1236                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1237                                 fwd_streams[sm_id];
1238                 }
1239                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1240                 tx_dropped = (uint64_t) (tx_dropped +
1241                                          fwd_streams[sm_id]->fwd_dropped);
1242                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1243
1244                 rx_bad_ip_csum =
1245                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1246                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1247                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1248                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1249                                                         rx_bad_ip_csum;
1250
1251                 rx_bad_l4_csum =
1252                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1253                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1254                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1255                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1256                                                         rx_bad_l4_csum;
1257
1258 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1259                 fwd_cycles = (uint64_t) (fwd_cycles +
1260                                          fwd_streams[sm_id]->core_cycles);
1261 #endif
1262         }
1263         total_recv = 0;
1264         total_xmit = 0;
1265         total_rx_dropped = 0;
1266         total_tx_dropped = 0;
1267         total_rx_nombuf  = 0;
1268         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1269                 pt_id = fwd_ports_ids[i];
1270
1271                 port = &ports[pt_id];
1272                 rte_eth_stats_get(pt_id, &stats);
1273                 stats.ipackets -= port->stats.ipackets;
1274                 port->stats.ipackets = 0;
1275                 stats.opackets -= port->stats.opackets;
1276                 port->stats.opackets = 0;
1277                 stats.ibytes   -= port->stats.ibytes;
1278                 port->stats.ibytes = 0;
1279                 stats.obytes   -= port->stats.obytes;
1280                 port->stats.obytes = 0;
1281                 stats.imissed  -= port->stats.imissed;
1282                 port->stats.imissed = 0;
1283                 stats.oerrors  -= port->stats.oerrors;
1284                 port->stats.oerrors = 0;
1285                 stats.rx_nombuf -= port->stats.rx_nombuf;
1286                 port->stats.rx_nombuf = 0;
1287
1288                 total_recv += stats.ipackets;
1289                 total_xmit += stats.opackets;
1290                 total_rx_dropped += stats.imissed;
1291                 total_tx_dropped += port->tx_dropped;
1292                 total_rx_nombuf  += stats.rx_nombuf;
1293
1294                 fwd_port_stats_display(pt_id, &stats);
1295         }
1296         printf("\n  %s Accumulated forward statistics for all ports"
1297                "%s\n",
1298                acc_stats_border, acc_stats_border);
1299         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1300                "%-"PRIu64"\n"
1301                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1302                "%-"PRIu64"\n",
1303                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1304                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1305         if (total_rx_nombuf > 0)
1306                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1307         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1308                "%s\n",
1309                acc_stats_border, acc_stats_border);
1310 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1311         if (total_recv > 0)
1312                 printf("\n  CPU cycles/packet=%u (total cycles="
1313                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1314                        (unsigned int)(fwd_cycles / total_recv),
1315                        fwd_cycles, total_recv);
1316 #endif
1317         printf("\nDone.\n");
1318         test_done = 1;
1319 }
1320
1321 void
1322 dev_set_link_up(portid_t pid)
1323 {
1324         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1325                 printf("\nSet link up fail.\n");
1326 }
1327
1328 void
1329 dev_set_link_down(portid_t pid)
1330 {
1331         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1332                 printf("\nSet link down fail.\n");
1333 }
1334
1335 static int
1336 all_ports_started(void)
1337 {
1338         portid_t pi;
1339         struct rte_port *port;
1340
1341         RTE_ETH_FOREACH_DEV(pi) {
1342                 port = &ports[pi];
1343                 /* Check if there is a port which is not started */
1344                 if ((port->port_status != RTE_PORT_STARTED) &&
1345                         (port->slave_flag == 0))
1346                         return 0;
1347         }
1348
1349         /* No port is not started */
1350         return 1;
1351 }
1352
1353 int
1354 all_ports_stopped(void)
1355 {
1356         portid_t pi;
1357         struct rte_port *port;
1358
1359         RTE_ETH_FOREACH_DEV(pi) {
1360                 port = &ports[pi];
1361                 if ((port->port_status != RTE_PORT_STOPPED) &&
1362                         (port->slave_flag == 0))
1363                         return 0;
1364         }
1365
1366         return 1;
1367 }
1368
1369 int
1370 port_is_started(portid_t port_id)
1371 {
1372         if (port_id_is_invalid(port_id, ENABLED_WARN))
1373                 return 0;
1374
1375         if (ports[port_id].port_status != RTE_PORT_STARTED)
1376                 return 0;
1377
1378         return 1;
1379 }
1380
1381 static int
1382 port_is_closed(portid_t port_id)
1383 {
1384         if (port_id_is_invalid(port_id, ENABLED_WARN))
1385                 return 0;
1386
1387         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1388                 return 0;
1389
1390         return 1;
1391 }
1392
1393 int
1394 start_port(portid_t pid)
1395 {
1396         int diag, need_check_link_status = -1;
1397         portid_t pi;
1398         queueid_t qi;
1399         struct rte_port *port;
1400         struct ether_addr mac_addr;
1401         enum rte_eth_event_type event_type;
1402
1403         if (port_id_is_invalid(pid, ENABLED_WARN))
1404                 return 0;
1405
1406         if(dcb_config)
1407                 dcb_test = 1;
1408         RTE_ETH_FOREACH_DEV(pi) {
1409                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1410                         continue;
1411
1412                 need_check_link_status = 0;
1413                 port = &ports[pi];
1414                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1415                                                  RTE_PORT_HANDLING) == 0) {
1416                         printf("Port %d is now not stopped\n", pi);
1417                         continue;
1418                 }
1419
1420                 if (port->need_reconfig > 0) {
1421                         port->need_reconfig = 0;
1422
1423                         if (flow_isolate_all) {
1424                                 int ret = port_flow_isolate(pi, 1);
1425                                 if (ret) {
1426                                         printf("Failed to apply isolated"
1427                                                " mode on port %d\n", pi);
1428                                         return -1;
1429                                 }
1430                         }
1431
1432                         printf("Configuring Port %d (socket %u)\n", pi,
1433                                         port->socket_id);
1434                         /* configure port */
1435                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1436                                                 &(port->dev_conf));
1437                         if (diag != 0) {
1438                                 if (rte_atomic16_cmpset(&(port->port_status),
1439                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1440                                         printf("Port %d can not be set back "
1441                                                         "to stopped\n", pi);
1442                                 printf("Fail to configure port %d\n", pi);
1443                                 /* try to reconfigure port next time */
1444                                 port->need_reconfig = 1;
1445                                 return -1;
1446                         }
1447                 }
1448                 if (port->need_reconfig_queues > 0) {
1449                         port->need_reconfig_queues = 0;
1450                         /* setup tx queues */
1451                         for (qi = 0; qi < nb_txq; qi++) {
1452                                 if ((numa_support) &&
1453                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1454                                         diag = rte_eth_tx_queue_setup(pi, qi,
1455                                                 nb_txd,txring_numa[pi],
1456                                                 &(port->tx_conf));
1457                                 else
1458                                         diag = rte_eth_tx_queue_setup(pi, qi,
1459                                                 nb_txd,port->socket_id,
1460                                                 &(port->tx_conf));
1461
1462                                 if (diag == 0)
1463                                         continue;
1464
1465                                 /* Fail to setup tx queue, return */
1466                                 if (rte_atomic16_cmpset(&(port->port_status),
1467                                                         RTE_PORT_HANDLING,
1468                                                         RTE_PORT_STOPPED) == 0)
1469                                         printf("Port %d can not be set back "
1470                                                         "to stopped\n", pi);
1471                                 printf("Fail to configure port %d tx queues\n", pi);
1472                                 /* try to reconfigure queues next time */
1473                                 port->need_reconfig_queues = 1;
1474                                 return -1;
1475                         }
1476                         /* setup rx queues */
1477                         for (qi = 0; qi < nb_rxq; qi++) {
1478                                 if ((numa_support) &&
1479                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1480                                         struct rte_mempool * mp =
1481                                                 mbuf_pool_find(rxring_numa[pi]);
1482                                         if (mp == NULL) {
1483                                                 printf("Failed to setup RX queue:"
1484                                                         "No mempool allocation"
1485                                                         " on the socket %d\n",
1486                                                         rxring_numa[pi]);
1487                                                 return -1;
1488                                         }
1489
1490                                         diag = rte_eth_rx_queue_setup(pi, qi,
1491                                              nb_rxd,rxring_numa[pi],
1492                                              &(port->rx_conf),mp);
1493                                 } else {
1494                                         struct rte_mempool *mp =
1495                                                 mbuf_pool_find(port->socket_id);
1496                                         if (mp == NULL) {
1497                                                 printf("Failed to setup RX queue:"
1498                                                         "No mempool allocation"
1499                                                         " on the socket %d\n",
1500                                                         port->socket_id);
1501                                                 return -1;
1502                                         }
1503                                         diag = rte_eth_rx_queue_setup(pi, qi,
1504                                              nb_rxd,port->socket_id,
1505                                              &(port->rx_conf), mp);
1506                                 }
1507                                 if (diag == 0)
1508                                         continue;
1509
1510                                 /* Fail to setup rx queue, return */
1511                                 if (rte_atomic16_cmpset(&(port->port_status),
1512                                                         RTE_PORT_HANDLING,
1513                                                         RTE_PORT_STOPPED) == 0)
1514                                         printf("Port %d can not be set back "
1515                                                         "to stopped\n", pi);
1516                                 printf("Fail to configure port %d rx queues\n", pi);
1517                                 /* try to reconfigure queues next time */
1518                                 port->need_reconfig_queues = 1;
1519                                 return -1;
1520                         }
1521                 }
1522
1523                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1524                      event_type < RTE_ETH_EVENT_MAX;
1525                      event_type++) {
1526                         diag = rte_eth_dev_callback_register(pi,
1527                                                         event_type,
1528                                                         eth_event_callback,
1529                                                         NULL);
1530                         if (diag) {
1531                                 printf("Failed to setup even callback for event %d\n",
1532                                         event_type);
1533                                 return -1;
1534                         }
1535                 }
1536
1537                 /* start port */
1538                 if (rte_eth_dev_start(pi) < 0) {
1539                         printf("Fail to start port %d\n", pi);
1540
1541                         /* Fail to setup rx queue, return */
1542                         if (rte_atomic16_cmpset(&(port->port_status),
1543                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1544                                 printf("Port %d can not be set back to "
1545                                                         "stopped\n", pi);
1546                         continue;
1547                 }
1548
1549                 if (rte_atomic16_cmpset(&(port->port_status),
1550                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1551                         printf("Port %d can not be set into started\n", pi);
1552
1553                 rte_eth_macaddr_get(pi, &mac_addr);
1554                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1555                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1556                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1557                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1558
1559                 /* at least one port started, need checking link status */
1560                 need_check_link_status = 1;
1561         }
1562
1563         if (need_check_link_status == 1 && !no_link_check)
1564                 check_all_ports_link_status(RTE_PORT_ALL);
1565         else if (need_check_link_status == 0)
1566                 printf("Please stop the ports first\n");
1567
1568         printf("Done\n");
1569         return 0;
1570 }
1571
1572 void
1573 stop_port(portid_t pid)
1574 {
1575         portid_t pi;
1576         struct rte_port *port;
1577         int need_check_link_status = 0;
1578
1579         if (dcb_test) {
1580                 dcb_test = 0;
1581                 dcb_config = 0;
1582         }
1583
1584         if (port_id_is_invalid(pid, ENABLED_WARN))
1585                 return;
1586
1587         printf("Stopping ports...\n");
1588
1589         RTE_ETH_FOREACH_DEV(pi) {
1590                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1591                         continue;
1592
1593                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1594                         printf("Please remove port %d from forwarding configuration.\n", pi);
1595                         continue;
1596                 }
1597
1598                 if (port_is_bonding_slave(pi)) {
1599                         printf("Please remove port %d from bonded device.\n", pi);
1600                         continue;
1601                 }
1602
1603                 port = &ports[pi];
1604                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1605                                                 RTE_PORT_HANDLING) == 0)
1606                         continue;
1607
1608                 rte_eth_dev_stop(pi);
1609
1610                 if (rte_atomic16_cmpset(&(port->port_status),
1611                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1612                         printf("Port %d can not be set into stopped\n", pi);
1613                 need_check_link_status = 1;
1614         }
1615         if (need_check_link_status && !no_link_check)
1616                 check_all_ports_link_status(RTE_PORT_ALL);
1617
1618         printf("Done\n");
1619 }
1620
1621 void
1622 close_port(portid_t pid)
1623 {
1624         portid_t pi;
1625         struct rte_port *port;
1626
1627         if (port_id_is_invalid(pid, ENABLED_WARN))
1628                 return;
1629
1630         printf("Closing ports...\n");
1631
1632         RTE_ETH_FOREACH_DEV(pi) {
1633                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1634                         continue;
1635
1636                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1637                         printf("Please remove port %d from forwarding configuration.\n", pi);
1638                         continue;
1639                 }
1640
1641                 if (port_is_bonding_slave(pi)) {
1642                         printf("Please remove port %d from bonded device.\n", pi);
1643                         continue;
1644                 }
1645
1646                 port = &ports[pi];
1647                 if (rte_atomic16_cmpset(&(port->port_status),
1648                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1649                         printf("Port %d is already closed\n", pi);
1650                         continue;
1651                 }
1652
1653                 if (rte_atomic16_cmpset(&(port->port_status),
1654                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1655                         printf("Port %d is now not stopped\n", pi);
1656                         continue;
1657                 }
1658
1659                 if (port->flow_list)
1660                         port_flow_flush(pi);
1661                 rte_eth_dev_close(pi);
1662
1663                 if (rte_atomic16_cmpset(&(port->port_status),
1664                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1665                         printf("Port %d cannot be set to closed\n", pi);
1666         }
1667
1668         printf("Done\n");
1669 }
1670
1671 void
1672 reset_port(portid_t pid)
1673 {
1674         int diag;
1675         portid_t pi;
1676         struct rte_port *port;
1677
1678         if (port_id_is_invalid(pid, ENABLED_WARN))
1679                 return;
1680
1681         printf("Resetting ports...\n");
1682
1683         RTE_ETH_FOREACH_DEV(pi) {
1684                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1685                         continue;
1686
1687                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1688                         printf("Please remove port %d from forwarding "
1689                                "configuration.\n", pi);
1690                         continue;
1691                 }
1692
1693                 if (port_is_bonding_slave(pi)) {
1694                         printf("Please remove port %d from bonded device.\n",
1695                                pi);
1696                         continue;
1697                 }
1698
1699                 diag = rte_eth_dev_reset(pi);
1700                 if (diag == 0) {
1701                         port = &ports[pi];
1702                         port->need_reconfig = 1;
1703                         port->need_reconfig_queues = 1;
1704                 } else {
1705                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1706                 }
1707         }
1708
1709         printf("Done\n");
1710 }
1711
1712 void
1713 attach_port(char *identifier)
1714 {
1715         portid_t pi = 0;
1716         unsigned int socket_id;
1717
1718         printf("Attaching a new port...\n");
1719
1720         if (identifier == NULL) {
1721                 printf("Invalid parameters are specified\n");
1722                 return;
1723         }
1724
1725         if (rte_eth_dev_attach(identifier, &pi))
1726                 return;
1727
1728         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1729         /* if socket_id is invalid, set to 0 */
1730         if (check_socket_id(socket_id) < 0)
1731                 socket_id = 0;
1732         reconfig(pi, socket_id);
1733         rte_eth_promiscuous_enable(pi);
1734
1735         nb_ports = rte_eth_dev_count();
1736
1737         ports[pi].port_status = RTE_PORT_STOPPED;
1738
1739         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1740         printf("Done\n");
1741 }
1742
1743 void
1744 detach_port(uint8_t port_id)
1745 {
1746         char name[RTE_ETH_NAME_MAX_LEN];
1747
1748         printf("Detaching a port...\n");
1749
1750         if (!port_is_closed(port_id)) {
1751                 printf("Please close port first\n");
1752                 return;
1753         }
1754
1755         if (ports[port_id].flow_list)
1756                 port_flow_flush(port_id);
1757
1758         if (rte_eth_dev_detach(port_id, name)) {
1759                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1760                 return;
1761         }
1762
1763         nb_ports = rte_eth_dev_count();
1764
1765         printf("Port '%s' is detached. Now total ports is %d\n",
1766                         name, nb_ports);
1767         printf("Done\n");
1768         return;
1769 }
1770
1771 void
1772 pmd_test_exit(void)
1773 {
1774         portid_t pt_id;
1775
1776         if (test_done == 0)
1777                 stop_packet_forwarding();
1778
1779         if (ports != NULL) {
1780                 no_link_check = 1;
1781                 RTE_ETH_FOREACH_DEV(pt_id) {
1782                         printf("\nShutting down port %d...\n", pt_id);
1783                         fflush(stdout);
1784                         stop_port(pt_id);
1785                         close_port(pt_id);
1786                 }
1787         }
1788         printf("\nBye...\n");
1789 }
1790
1791 typedef void (*cmd_func_t)(void);
1792 struct pmd_test_command {
1793         const char *cmd_name;
1794         cmd_func_t cmd_func;
1795 };
1796
1797 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1798
1799 /* Check the link status of all ports in up to 9s, and print them finally */
1800 static void
1801 check_all_ports_link_status(uint32_t port_mask)
1802 {
1803 #define CHECK_INTERVAL 100 /* 100ms */
1804 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1805         portid_t portid;
1806         uint8_t count, all_ports_up, print_flag = 0;
1807         struct rte_eth_link link;
1808
1809         printf("Checking link statuses...\n");
1810         fflush(stdout);
1811         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1812                 all_ports_up = 1;
1813                 RTE_ETH_FOREACH_DEV(portid) {
1814                         if ((port_mask & (1 << portid)) == 0)
1815                                 continue;
1816                         memset(&link, 0, sizeof(link));
1817                         rte_eth_link_get_nowait(portid, &link);
1818                         /* print link status if flag set */
1819                         if (print_flag == 1) {
1820                                 if (link.link_status)
1821                                         printf(
1822                                         "Port%d Link Up. speed %u Mbps- %s\n",
1823                                         portid, link.link_speed,
1824                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1825                                         ("full-duplex") : ("half-duplex\n"));
1826                                 else
1827                                         printf("Port %d Link Down\n", portid);
1828                                 continue;
1829                         }
1830                         /* clear all_ports_up flag if any link down */
1831                         if (link.link_status == ETH_LINK_DOWN) {
1832                                 all_ports_up = 0;
1833                                 break;
1834                         }
1835                 }
1836                 /* after finally printing all link status, get out */
1837                 if (print_flag == 1)
1838                         break;
1839
1840                 if (all_ports_up == 0) {
1841                         fflush(stdout);
1842                         rte_delay_ms(CHECK_INTERVAL);
1843                 }
1844
1845                 /* set the print_flag if all ports up or timeout */
1846                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1847                         print_flag = 1;
1848                 }
1849
1850                 if (lsc_interrupt)
1851                         break;
1852         }
1853 }
1854
1855 static void
1856 rmv_event_callback(void *arg)
1857 {
1858         struct rte_eth_dev *dev;
1859         uint8_t port_id = (intptr_t)arg;
1860
1861         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1862         dev = &rte_eth_devices[port_id];
1863
1864         stop_port(port_id);
1865         close_port(port_id);
1866         printf("removing device %s\n", dev->device->name);
1867         if (rte_eal_dev_detach(dev->device))
1868                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1869                         dev->device->name);
1870 }
1871
1872 /* This function is used by the interrupt thread */
1873 static int
1874 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1875                   void *ret_param)
1876 {
1877         static const char * const event_desc[] = {
1878                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1879                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1880                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1881                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1882                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1883                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1884                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1885                 [RTE_ETH_EVENT_MAX] = NULL,
1886         };
1887
1888         RTE_SET_USED(param);
1889         RTE_SET_USED(ret_param);
1890
1891         if (type >= RTE_ETH_EVENT_MAX) {
1892                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1893                         port_id, __func__, type);
1894                 fflush(stderr);
1895         } else if (event_print_mask & (UINT32_C(1) << type)) {
1896                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1897                         event_desc[type]);
1898                 fflush(stdout);
1899         }
1900
1901         switch (type) {
1902         case RTE_ETH_EVENT_INTR_RMV:
1903                 if (rte_eal_alarm_set(100000,
1904                                 rmv_event_callback, (void *)(intptr_t)port_id))
1905                         fprintf(stderr, "Could not set up deferred device removal\n");
1906                 break;
1907         default:
1908                 break;
1909         }
1910         return 0;
1911 }
1912
1913 static int
1914 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1915 {
1916         uint16_t i;
1917         int diag;
1918         uint8_t mapping_found = 0;
1919
1920         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1921                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1922                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1923                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1924                                         tx_queue_stats_mappings[i].queue_id,
1925                                         tx_queue_stats_mappings[i].stats_counter_id);
1926                         if (diag != 0)
1927                                 return diag;
1928                         mapping_found = 1;
1929                 }
1930         }
1931         if (mapping_found)
1932                 port->tx_queue_stats_mapping_enabled = 1;
1933         return 0;
1934 }
1935
1936 static int
1937 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1938 {
1939         uint16_t i;
1940         int diag;
1941         uint8_t mapping_found = 0;
1942
1943         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1944                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1945                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1946                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1947                                         rx_queue_stats_mappings[i].queue_id,
1948                                         rx_queue_stats_mappings[i].stats_counter_id);
1949                         if (diag != 0)
1950                                 return diag;
1951                         mapping_found = 1;
1952                 }
1953         }
1954         if (mapping_found)
1955                 port->rx_queue_stats_mapping_enabled = 1;
1956         return 0;
1957 }
1958
1959 static void
1960 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1961 {
1962         int diag = 0;
1963
1964         diag = set_tx_queue_stats_mapping_registers(pi, port);
1965         if (diag != 0) {
1966                 if (diag == -ENOTSUP) {
1967                         port->tx_queue_stats_mapping_enabled = 0;
1968                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1969                 }
1970                 else
1971                         rte_exit(EXIT_FAILURE,
1972                                         "set_tx_queue_stats_mapping_registers "
1973                                         "failed for port id=%d diag=%d\n",
1974                                         pi, diag);
1975         }
1976
1977         diag = set_rx_queue_stats_mapping_registers(pi, port);
1978         if (diag != 0) {
1979                 if (diag == -ENOTSUP) {
1980                         port->rx_queue_stats_mapping_enabled = 0;
1981                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1982                 }
1983                 else
1984                         rte_exit(EXIT_FAILURE,
1985                                         "set_rx_queue_stats_mapping_registers "
1986                                         "failed for port id=%d diag=%d\n",
1987                                         pi, diag);
1988         }
1989 }
1990
1991 static void
1992 rxtx_port_config(struct rte_port *port)
1993 {
1994         port->rx_conf = port->dev_info.default_rxconf;
1995         port->tx_conf = port->dev_info.default_txconf;
1996
1997         /* Check if any RX/TX parameters have been passed */
1998         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1999                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2000
2001         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2002                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2003
2004         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2005                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2006
2007         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2008                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2009
2010         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2011                 port->rx_conf.rx_drop_en = rx_drop_en;
2012
2013         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2014                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2015
2016         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2017                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2018
2019         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2020                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2021
2022         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2023                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2024
2025         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2026                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2027
2028         if (txq_flags != RTE_PMD_PARAM_UNSET)
2029                 port->tx_conf.txq_flags = txq_flags;
2030 }
2031
2032 void
2033 init_port_config(void)
2034 {
2035         portid_t pid;
2036         struct rte_port *port;
2037
2038         RTE_ETH_FOREACH_DEV(pid) {
2039                 port = &ports[pid];
2040                 port->dev_conf.rxmode = rx_mode;
2041                 port->dev_conf.fdir_conf = fdir_conf;
2042                 if (nb_rxq > 1) {
2043                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2044                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2045                 } else {
2046                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2047                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2048                 }
2049
2050                 if (port->dcb_flag == 0) {
2051                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2052                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2053                         else
2054                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2055                 }
2056
2057                 rxtx_port_config(port);
2058
2059                 rte_eth_macaddr_get(pid, &port->eth_addr);
2060
2061                 map_port_queue_stats_mapping_registers(pid, port);
2062 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2063                 rte_pmd_ixgbe_bypass_init(pid);
2064 #endif
2065
2066                 if (lsc_interrupt &&
2067                     (rte_eth_devices[pid].data->dev_flags &
2068                      RTE_ETH_DEV_INTR_LSC))
2069                         port->dev_conf.intr_conf.lsc = 1;
2070                 if (rmv_interrupt &&
2071                     (rte_eth_devices[pid].data->dev_flags &
2072                      RTE_ETH_DEV_INTR_RMV))
2073                         port->dev_conf.intr_conf.rmv = 1;
2074         }
2075 }
2076
2077 void set_port_slave_flag(portid_t slave_pid)
2078 {
2079         struct rte_port *port;
2080
2081         port = &ports[slave_pid];
2082         port->slave_flag = 1;
2083 }
2084
2085 void clear_port_slave_flag(portid_t slave_pid)
2086 {
2087         struct rte_port *port;
2088
2089         port = &ports[slave_pid];
2090         port->slave_flag = 0;
2091 }
2092
2093 uint8_t port_is_bonding_slave(portid_t slave_pid)
2094 {
2095         struct rte_port *port;
2096
2097         port = &ports[slave_pid];
2098         return port->slave_flag;
2099 }
2100
2101 const uint16_t vlan_tags[] = {
2102                 0,  1,  2,  3,  4,  5,  6,  7,
2103                 8,  9, 10, 11,  12, 13, 14, 15,
2104                 16, 17, 18, 19, 20, 21, 22, 23,
2105                 24, 25, 26, 27, 28, 29, 30, 31
2106 };
2107
2108 static  int
2109 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2110                  enum dcb_mode_enable dcb_mode,
2111                  enum rte_eth_nb_tcs num_tcs,
2112                  uint8_t pfc_en)
2113 {
2114         uint8_t i;
2115
2116         /*
2117          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2118          * given above, and the number of traffic classes available for use.
2119          */
2120         if (dcb_mode == DCB_VT_ENABLED) {
2121                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2122                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2123                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2124                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2125
2126                 /* VMDQ+DCB RX and TX configurations */
2127                 vmdq_rx_conf->enable_default_pool = 0;
2128                 vmdq_rx_conf->default_pool = 0;
2129                 vmdq_rx_conf->nb_queue_pools =
2130                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2131                 vmdq_tx_conf->nb_queue_pools =
2132                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2133
2134                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2135                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2136                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2137                         vmdq_rx_conf->pool_map[i].pools =
2138                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2139                 }
2140                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2141                         vmdq_rx_conf->dcb_tc[i] = i;
2142                         vmdq_tx_conf->dcb_tc[i] = i;
2143                 }
2144
2145                 /* set DCB mode of RX and TX of multiple queues */
2146                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2147                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2148         } else {
2149                 struct rte_eth_dcb_rx_conf *rx_conf =
2150                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2151                 struct rte_eth_dcb_tx_conf *tx_conf =
2152                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2153
2154                 rx_conf->nb_tcs = num_tcs;
2155                 tx_conf->nb_tcs = num_tcs;
2156
2157                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2158                         rx_conf->dcb_tc[i] = i % num_tcs;
2159                         tx_conf->dcb_tc[i] = i % num_tcs;
2160                 }
2161                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2162                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2163                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2164         }
2165
2166         if (pfc_en)
2167                 eth_conf->dcb_capability_en =
2168                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2169         else
2170                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2171
2172         return 0;
2173 }
2174
2175 int
2176 init_port_dcb_config(portid_t pid,
2177                      enum dcb_mode_enable dcb_mode,
2178                      enum rte_eth_nb_tcs num_tcs,
2179                      uint8_t pfc_en)
2180 {
2181         struct rte_eth_conf port_conf;
2182         struct rte_port *rte_port;
2183         int retval;
2184         uint16_t i;
2185
2186         rte_port = &ports[pid];
2187
2188         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2189         /* Enter DCB configuration status */
2190         dcb_config = 1;
2191
2192         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2193         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2194         if (retval < 0)
2195                 return retval;
2196         port_conf.rxmode.hw_vlan_filter = 1;
2197
2198         /**
2199          * Write the configuration into the device.
2200          * Set the numbers of RX & TX queues to 0, so
2201          * the RX & TX queues will not be setup.
2202          */
2203         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2204
2205         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2206
2207         /* If dev_info.vmdq_pool_base is greater than 0,
2208          * the queue id of vmdq pools is started after pf queues.
2209          */
2210         if (dcb_mode == DCB_VT_ENABLED &&
2211             rte_port->dev_info.vmdq_pool_base > 0) {
2212                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2213                         " for port %d.", pid);
2214                 return -1;
2215         }
2216
2217         /* Assume the ports in testpmd have the same dcb capability
2218          * and has the same number of rxq and txq in dcb mode
2219          */
2220         if (dcb_mode == DCB_VT_ENABLED) {
2221                 if (rte_port->dev_info.max_vfs > 0) {
2222                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2223                         nb_txq = rte_port->dev_info.nb_tx_queues;
2224                 } else {
2225                         nb_rxq = rte_port->dev_info.max_rx_queues;
2226                         nb_txq = rte_port->dev_info.max_tx_queues;
2227                 }
2228         } else {
2229                 /*if vt is disabled, use all pf queues */
2230                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2231                         nb_rxq = rte_port->dev_info.max_rx_queues;
2232                         nb_txq = rte_port->dev_info.max_tx_queues;
2233                 } else {
2234                         nb_rxq = (queueid_t)num_tcs;
2235                         nb_txq = (queueid_t)num_tcs;
2236
2237                 }
2238         }
2239         rx_free_thresh = 64;
2240
2241         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2242
2243         rxtx_port_config(rte_port);
2244         /* VLAN filter */
2245         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2246         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2247                 rx_vft_set(pid, vlan_tags[i], 1);
2248
2249         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2250         map_port_queue_stats_mapping_registers(pid, rte_port);
2251
2252         rte_port->dcb_flag = 1;
2253
2254         return 0;
2255 }
2256
2257 static void
2258 init_port(void)
2259 {
2260         /* Configuration of Ethernet ports. */
2261         ports = rte_zmalloc("testpmd: ports",
2262                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2263                             RTE_CACHE_LINE_SIZE);
2264         if (ports == NULL) {
2265                 rte_exit(EXIT_FAILURE,
2266                                 "rte_zmalloc(%d struct rte_port) failed\n",
2267                                 RTE_MAX_ETHPORTS);
2268         }
2269 }
2270
2271 static void
2272 force_quit(void)
2273 {
2274         pmd_test_exit();
2275         prompt_exit();
2276 }
2277
2278 static void
2279 print_stats(void)
2280 {
2281         uint8_t i;
2282         const char clr[] = { 27, '[', '2', 'J', '\0' };
2283         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2284
2285         /* Clear screen and move to top left */
2286         printf("%s%s", clr, top_left);
2287
2288         printf("\nPort statistics ====================================");
2289         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2290                 nic_stats_display(fwd_ports_ids[i]);
2291 }
2292
2293 static void
2294 signal_handler(int signum)
2295 {
2296         if (signum == SIGINT || signum == SIGTERM) {
2297                 printf("\nSignal %d received, preparing to exit...\n",
2298                                 signum);
2299 #ifdef RTE_LIBRTE_PDUMP
2300                 /* uninitialize packet capture framework */
2301                 rte_pdump_uninit();
2302 #endif
2303 #ifdef RTE_LIBRTE_LATENCY_STATS
2304                 rte_latencystats_uninit();
2305 #endif
2306                 force_quit();
2307                 /* exit with the expected status */
2308                 signal(signum, SIG_DFL);
2309                 kill(getpid(), signum);
2310         }
2311 }
2312
2313 int
2314 main(int argc, char** argv)
2315 {
2316         int  diag;
2317         portid_t port_id;
2318
2319         signal(SIGINT, signal_handler);
2320         signal(SIGTERM, signal_handler);
2321
2322         diag = rte_eal_init(argc, argv);
2323         if (diag < 0)
2324                 rte_panic("Cannot init EAL\n");
2325
2326         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2327                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2328                         strerror(errno));
2329         }
2330
2331 #ifdef RTE_LIBRTE_PDUMP
2332         /* initialize packet capture framework */
2333         rte_pdump_init(NULL);
2334 #endif
2335
2336         nb_ports = (portid_t) rte_eth_dev_count();
2337         if (nb_ports == 0)
2338                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2339
2340         /* allocate port structures, and init them */
2341         init_port();
2342
2343         set_def_fwd_config();
2344         if (nb_lcores == 0)
2345                 rte_panic("Empty set of forwarding logical cores - check the "
2346                           "core mask supplied in the command parameters\n");
2347
2348         /* Bitrate/latency stats disabled by default */
2349 #ifdef RTE_LIBRTE_BITRATE
2350         bitrate_enabled = 0;
2351 #endif
2352 #ifdef RTE_LIBRTE_LATENCY_STATS
2353         latencystats_enabled = 0;
2354 #endif
2355
2356         argc -= diag;
2357         argv += diag;
2358         if (argc > 1)
2359                 launch_args_parse(argc, argv);
2360
2361         if (tx_first && interactive)
2362                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2363                                 "interactive mode.\n");
2364
2365         if (tx_first && lsc_interrupt) {
2366                 printf("Warning: lsc_interrupt needs to be off when "
2367                                 " using tx_first. Disabling.\n");
2368                 lsc_interrupt = 0;
2369         }
2370
2371         if (!nb_rxq && !nb_txq)
2372                 printf("Warning: Either rx or tx queues should be non-zero\n");
2373
2374         if (nb_rxq > 1 && nb_rxq > nb_txq)
2375                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2376                        "but nb_txq=%d will prevent to fully test it.\n",
2377                        nb_rxq, nb_txq);
2378
2379         init_config();
2380         if (start_port(RTE_PORT_ALL) != 0)
2381                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2382
2383         /* set all ports to promiscuous mode by default */
2384         RTE_ETH_FOREACH_DEV(port_id)
2385                 rte_eth_promiscuous_enable(port_id);
2386
2387         /* Init metrics library */
2388         rte_metrics_init(rte_socket_id());
2389
2390 #ifdef RTE_LIBRTE_LATENCY_STATS
2391         if (latencystats_enabled != 0) {
2392                 int ret = rte_latencystats_init(1, NULL);
2393                 if (ret)
2394                         printf("Warning: latencystats init()"
2395                                 " returned error %d\n", ret);
2396                 printf("Latencystats running on lcore %d\n",
2397                         latencystats_lcore_id);
2398         }
2399 #endif
2400
2401         /* Setup bitrate stats */
2402 #ifdef RTE_LIBRTE_BITRATE
2403         if (bitrate_enabled != 0) {
2404                 bitrate_data = rte_stats_bitrate_create();
2405                 if (bitrate_data == NULL)
2406                         rte_exit(EXIT_FAILURE,
2407                                 "Could not allocate bitrate data.\n");
2408                 rte_stats_bitrate_reg(bitrate_data);
2409         }
2410 #endif
2411
2412 #ifdef RTE_LIBRTE_CMDLINE
2413         if (strlen(cmdline_filename) != 0)
2414                 cmdline_read_from_file(cmdline_filename);
2415
2416         if (interactive == 1) {
2417                 if (auto_start) {
2418                         printf("Start automatic packet forwarding\n");
2419                         start_packet_forwarding(0);
2420                 }
2421                 prompt();
2422                 pmd_test_exit();
2423         } else
2424 #endif
2425         {
2426                 char c;
2427                 int rc;
2428
2429                 printf("No commandline core given, start packet forwarding\n");
2430                 start_packet_forwarding(tx_first);
2431                 if (stats_period != 0) {
2432                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2433                         uint64_t timer_period;
2434
2435                         /* Convert to number of cycles */
2436                         timer_period = stats_period * rte_get_timer_hz();
2437
2438                         while (1) {
2439                                 cur_time = rte_get_timer_cycles();
2440                                 diff_time += cur_time - prev_time;
2441
2442                                 if (diff_time >= timer_period) {
2443                                         print_stats();
2444                                         /* Reset the timer */
2445                                         diff_time = 0;
2446                                 }
2447                                 /* Sleep to avoid unnecessary checks */
2448                                 prev_time = cur_time;
2449                                 sleep(1);
2450                         }
2451                 }
2452
2453                 printf("Press enter to exit\n");
2454                 rc = read(0, &c, 1);
2455                 pmd_test_exit();
2456                 if (rc < 0)
2457                         return 1;
2458         }
2459
2460         return 0;
2461 }