app/testpmd: register a specific log type
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61
62 #include "testpmd.h"
63
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;        /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130         &io_fwd_engine,
131         &mac_fwd_engine,
132         &mac_swap_engine,
133         &flow_gen_engine,
134         &rx_only_engine,
135         &tx_only_engine,
136         &csum_fwd_engine,
137         &icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139         &softnic_tm_engine,
140         &softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143         &ieee1588_fwd_engine,
144 #endif
145         NULL,
146 };
147
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170         TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Configurable value of TX queue flags.
235  */
236 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Receive Side Scaling (RSS) configuration.
240  */
241 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
242
243 /*
244  * Port topology configuration
245  */
246 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
247
248 /*
249  * Avoids to flush all the RX streams before starts forwarding.
250  */
251 uint8_t no_flush_rx = 0; /* flush by default */
252
253 /*
254  * Flow API isolated mode.
255  */
256 uint8_t flow_isolate_all;
257
258 /*
259  * Avoids to check link status when starting/stopping a port.
260  */
261 uint8_t no_link_check = 0; /* check by default */
262
263 /*
264  * Enable link status change notification
265  */
266 uint8_t lsc_interrupt = 1; /* enabled by default */
267
268 /*
269  * Enable device removal notification.
270  */
271 uint8_t rmv_interrupt = 1; /* enabled by default */
272
273 /*
274  * Display or mask ether events
275  * Default to all events except VF_MBOX
276  */
277 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
278                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
279                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
280                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
281                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
282                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
283
284 /*
285  * NIC bypass mode configuration options.
286  */
287
288 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
291 #endif
292
293
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305
306 #endif
307
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313         .split_hdr_size = 0,
314         .header_split   = 0, /**< Header Split disabled. */
315         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
318         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
320         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
321         .hw_timestamp   = 0, /**< HW timestamp enabled. */
322 };
323
324 struct rte_fdir_conf fdir_conf = {
325         .mode = RTE_FDIR_MODE_NONE,
326         .pballoc = RTE_FDIR_PBALLOC_64K,
327         .status = RTE_FDIR_REPORT_STATUS,
328         .mask = {
329                 .vlan_tci_mask = 0x0,
330                 .ipv4_mask     = {
331                         .src_ip = 0xFFFFFFFF,
332                         .dst_ip = 0xFFFFFFFF,
333                 },
334                 .ipv6_mask     = {
335                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
337                 },
338                 .src_port_mask = 0xFFFF,
339                 .dst_port_mask = 0xFFFF,
340                 .mac_addr_byte_mask = 0xFF,
341                 .tunnel_type_mask = 1,
342                 .tunnel_id_mask = 0xFFFFFFFF,
343         },
344         .drop_queue = 127,
345 };
346
347 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
348
349 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
350 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
351
352 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
353 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
354
355 uint16_t nb_tx_queue_stats_mappings = 0;
356 uint16_t nb_rx_queue_stats_mappings = 0;
357
358 /*
359  * Display zero values by default for xstats
360  */
361 uint8_t xstats_hide_zero;
362
363 unsigned int num_sockets = 0;
364 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
365
366 #ifdef RTE_LIBRTE_BITRATE
367 /* Bitrate statistics */
368 struct rte_stats_bitrates *bitrate_data;
369 lcoreid_t bitrate_lcore_id;
370 uint8_t bitrate_enabled;
371 #endif
372
373 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
374 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
375
376 /* Forward function declarations */
377 static void map_port_queue_stats_mapping_registers(portid_t pi,
378                                                    struct rte_port *port);
379 static void check_all_ports_link_status(uint32_t port_mask);
380 static int eth_event_callback(portid_t port_id,
381                               enum rte_eth_event_type type,
382                               void *param, void *ret_param);
383
384 /*
385  * Check if all the ports are started.
386  * If yes, return positive value. If not, return zero.
387  */
388 static int all_ports_started(void);
389
390 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
391 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
392
393 /*
394  * Helper function to check if socket is already discovered.
395  * If yes, return positive value. If not, return zero.
396  */
397 int
398 new_socket_id(unsigned int socket_id)
399 {
400         unsigned int i;
401
402         for (i = 0; i < num_sockets; i++) {
403                 if (socket_ids[i] == socket_id)
404                         return 0;
405         }
406         return 1;
407 }
408
409 /*
410  * Setup default configuration.
411  */
412 static void
413 set_default_fwd_lcores_config(void)
414 {
415         unsigned int i;
416         unsigned int nb_lc;
417         unsigned int sock_num;
418
419         nb_lc = 0;
420         for (i = 0; i < RTE_MAX_LCORE; i++) {
421                 sock_num = rte_lcore_to_socket_id(i);
422                 if (new_socket_id(sock_num)) {
423                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
424                                 rte_exit(EXIT_FAILURE,
425                                          "Total sockets greater than %u\n",
426                                          RTE_MAX_NUMA_NODES);
427                         }
428                         socket_ids[num_sockets++] = sock_num;
429                 }
430                 if (!rte_lcore_is_enabled(i))
431                         continue;
432                 if (i == rte_get_master_lcore())
433                         continue;
434                 fwd_lcores_cpuids[nb_lc++] = i;
435         }
436         nb_lcores = (lcoreid_t) nb_lc;
437         nb_cfg_lcores = nb_lcores;
438         nb_fwd_lcores = 1;
439 }
440
441 static void
442 set_def_peer_eth_addrs(void)
443 {
444         portid_t i;
445
446         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
447                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
448                 peer_eth_addrs[i].addr_bytes[5] = i;
449         }
450 }
451
452 static void
453 set_default_fwd_ports_config(void)
454 {
455         portid_t pt_id;
456         int i = 0;
457
458         RTE_ETH_FOREACH_DEV(pt_id)
459                 fwd_ports_ids[i++] = pt_id;
460
461         nb_cfg_ports = nb_ports;
462         nb_fwd_ports = nb_ports;
463 }
464
465 void
466 set_def_fwd_config(void)
467 {
468         set_default_fwd_lcores_config();
469         set_def_peer_eth_addrs();
470         set_default_fwd_ports_config();
471 }
472
473 /*
474  * Configuration initialisation done once at init time.
475  */
476 static void
477 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
478                  unsigned int socket_id)
479 {
480         char pool_name[RTE_MEMPOOL_NAMESIZE];
481         struct rte_mempool *rte_mp = NULL;
482         uint32_t mb_size;
483
484         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
485         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
486
487         TESTPMD_LOG(INFO,
488                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
489                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
490
491         if (mp_anon != 0) {
492                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
493                         mb_size, (unsigned) mb_mempool_cache,
494                         sizeof(struct rte_pktmbuf_pool_private),
495                         socket_id, 0);
496                 if (rte_mp == NULL)
497                         goto err;
498
499                 if (rte_mempool_populate_anon(rte_mp) == 0) {
500                         rte_mempool_free(rte_mp);
501                         rte_mp = NULL;
502                         goto err;
503                 }
504                 rte_pktmbuf_pool_init(rte_mp, NULL);
505                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
506         } else {
507                 /* wrapper to rte_mempool_create() */
508                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
509                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
510         }
511
512 err:
513         if (rte_mp == NULL) {
514                 rte_exit(EXIT_FAILURE,
515                         "Creation of mbuf pool for socket %u failed: %s\n",
516                         socket_id, rte_strerror(rte_errno));
517         } else if (verbose_level > 0) {
518                 rte_mempool_dump(stdout, rte_mp);
519         }
520 }
521
522 /*
523  * Check given socket id is valid or not with NUMA mode,
524  * if valid, return 0, else return -1
525  */
526 static int
527 check_socket_id(const unsigned int socket_id)
528 {
529         static int warning_once = 0;
530
531         if (new_socket_id(socket_id)) {
532                 if (!warning_once && numa_support)
533                         printf("Warning: NUMA should be configured manually by"
534                                " using --port-numa-config and"
535                                " --ring-numa-config parameters along with"
536                                " --numa.\n");
537                 warning_once = 1;
538                 return -1;
539         }
540         return 0;
541 }
542
543 static void
544 init_config(void)
545 {
546         portid_t pid;
547         struct rte_port *port;
548         struct rte_mempool *mbp;
549         unsigned int nb_mbuf_per_pool;
550         lcoreid_t  lc_id;
551         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
552         struct rte_gro_param gro_param;
553         uint32_t gso_types;
554
555         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
556
557         if (numa_support) {
558                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
559                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
560                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
561         }
562
563         /* Configuration of logical cores. */
564         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
565                                 sizeof(struct fwd_lcore *) * nb_lcores,
566                                 RTE_CACHE_LINE_SIZE);
567         if (fwd_lcores == NULL) {
568                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
569                                                         "failed\n", nb_lcores);
570         }
571         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
572                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
573                                                sizeof(struct fwd_lcore),
574                                                RTE_CACHE_LINE_SIZE);
575                 if (fwd_lcores[lc_id] == NULL) {
576                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
577                                                                 "failed\n");
578                 }
579                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
580         }
581
582         RTE_ETH_FOREACH_DEV(pid) {
583                 port = &ports[pid];
584                 rte_eth_dev_info_get(pid, &port->dev_info);
585
586                 if (numa_support) {
587                         if (port_numa[pid] != NUMA_NO_CONFIG)
588                                 port_per_socket[port_numa[pid]]++;
589                         else {
590                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
591
592                                 /* if socket_id is invalid, set to 0 */
593                                 if (check_socket_id(socket_id) < 0)
594                                         socket_id = 0;
595                                 port_per_socket[socket_id]++;
596                         }
597                 }
598
599                 /* set flag to initialize port/queue */
600                 port->need_reconfig = 1;
601                 port->need_reconfig_queues = 1;
602         }
603
604         /*
605          * Create pools of mbuf.
606          * If NUMA support is disabled, create a single pool of mbuf in
607          * socket 0 memory by default.
608          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
609          *
610          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
611          * nb_txd can be configured at run time.
612          */
613         if (param_total_num_mbufs)
614                 nb_mbuf_per_pool = param_total_num_mbufs;
615         else {
616                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
617                         (nb_lcores * mb_mempool_cache) +
618                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
619                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
620         }
621
622         if (numa_support) {
623                 uint8_t i;
624
625                 for (i = 0; i < num_sockets; i++)
626                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
627                                          socket_ids[i]);
628         } else {
629                 if (socket_num == UMA_NO_CONFIG)
630                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
631                 else
632                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
633                                                  socket_num);
634         }
635
636         init_port_config();
637
638         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
639                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
640         /*
641          * Records which Mbuf pool to use by each logical core, if needed.
642          */
643         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
644                 mbp = mbuf_pool_find(
645                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
646
647                 if (mbp == NULL)
648                         mbp = mbuf_pool_find(0);
649                 fwd_lcores[lc_id]->mbp = mbp;
650                 /* initialize GSO context */
651                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
652                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
653                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
654                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
655                         ETHER_CRC_LEN;
656                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
657         }
658
659         /* Configuration of packet forwarding streams. */
660         if (init_fwd_streams() < 0)
661                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
662
663         fwd_config_setup();
664
665         /* create a gro context for each lcore */
666         gro_param.gro_types = RTE_GRO_TCP_IPV4;
667         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
668         gro_param.max_item_per_flow = MAX_PKT_BURST;
669         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
670                 gro_param.socket_id = rte_lcore_to_socket_id(
671                                 fwd_lcores_cpuids[lc_id]);
672                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
673                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
674                         rte_exit(EXIT_FAILURE,
675                                         "rte_gro_ctx_create() failed\n");
676                 }
677         }
678 }
679
680
681 void
682 reconfig(portid_t new_port_id, unsigned socket_id)
683 {
684         struct rte_port *port;
685
686         /* Reconfiguration of Ethernet ports. */
687         port = &ports[new_port_id];
688         rte_eth_dev_info_get(new_port_id, &port->dev_info);
689
690         /* set flag to initialize port/queue */
691         port->need_reconfig = 1;
692         port->need_reconfig_queues = 1;
693         port->socket_id = socket_id;
694
695         init_port_config();
696 }
697
698
699 int
700 init_fwd_streams(void)
701 {
702         portid_t pid;
703         struct rte_port *port;
704         streamid_t sm_id, nb_fwd_streams_new;
705         queueid_t q;
706
707         /* set socket id according to numa or not */
708         RTE_ETH_FOREACH_DEV(pid) {
709                 port = &ports[pid];
710                 if (nb_rxq > port->dev_info.max_rx_queues) {
711                         printf("Fail: nb_rxq(%d) is greater than "
712                                 "max_rx_queues(%d)\n", nb_rxq,
713                                 port->dev_info.max_rx_queues);
714                         return -1;
715                 }
716                 if (nb_txq > port->dev_info.max_tx_queues) {
717                         printf("Fail: nb_txq(%d) is greater than "
718                                 "max_tx_queues(%d)\n", nb_txq,
719                                 port->dev_info.max_tx_queues);
720                         return -1;
721                 }
722                 if (numa_support) {
723                         if (port_numa[pid] != NUMA_NO_CONFIG)
724                                 port->socket_id = port_numa[pid];
725                         else {
726                                 port->socket_id = rte_eth_dev_socket_id(pid);
727
728                                 /* if socket_id is invalid, set to 0 */
729                                 if (check_socket_id(port->socket_id) < 0)
730                                         port->socket_id = 0;
731                         }
732                 }
733                 else {
734                         if (socket_num == UMA_NO_CONFIG)
735                                 port->socket_id = 0;
736                         else
737                                 port->socket_id = socket_num;
738                 }
739         }
740
741         q = RTE_MAX(nb_rxq, nb_txq);
742         if (q == 0) {
743                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
744                 return -1;
745         }
746         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
747         if (nb_fwd_streams_new == nb_fwd_streams)
748                 return 0;
749         /* clear the old */
750         if (fwd_streams != NULL) {
751                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
752                         if (fwd_streams[sm_id] == NULL)
753                                 continue;
754                         rte_free(fwd_streams[sm_id]);
755                         fwd_streams[sm_id] = NULL;
756                 }
757                 rte_free(fwd_streams);
758                 fwd_streams = NULL;
759         }
760
761         /* init new */
762         nb_fwd_streams = nb_fwd_streams_new;
763         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
764                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
765         if (fwd_streams == NULL)
766                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
767                                                 "failed\n", nb_fwd_streams);
768
769         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
770                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
771                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
772                 if (fwd_streams[sm_id] == NULL)
773                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
774                                                                 " failed\n");
775         }
776
777         return 0;
778 }
779
780 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
781 static void
782 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
783 {
784         unsigned int total_burst;
785         unsigned int nb_burst;
786         unsigned int burst_stats[3];
787         uint16_t pktnb_stats[3];
788         uint16_t nb_pkt;
789         int burst_percent[3];
790
791         /*
792          * First compute the total number of packet bursts and the
793          * two highest numbers of bursts of the same number of packets.
794          */
795         total_burst = 0;
796         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
797         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
798         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
799                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
800                 if (nb_burst == 0)
801                         continue;
802                 total_burst += nb_burst;
803                 if (nb_burst > burst_stats[0]) {
804                         burst_stats[1] = burst_stats[0];
805                         pktnb_stats[1] = pktnb_stats[0];
806                         burst_stats[0] = nb_burst;
807                         pktnb_stats[0] = nb_pkt;
808                 }
809         }
810         if (total_burst == 0)
811                 return;
812         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
813         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
814                burst_percent[0], (int) pktnb_stats[0]);
815         if (burst_stats[0] == total_burst) {
816                 printf("]\n");
817                 return;
818         }
819         if (burst_stats[0] + burst_stats[1] == total_burst) {
820                 printf(" + %d%% of %d pkts]\n",
821                        100 - burst_percent[0], pktnb_stats[1]);
822                 return;
823         }
824         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
825         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
826         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
827                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
828                 return;
829         }
830         printf(" + %d%% of %d pkts + %d%% of others]\n",
831                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
832 }
833 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
834
835 static void
836 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
837 {
838         struct rte_port *port;
839         uint8_t i;
840
841         static const char *fwd_stats_border = "----------------------";
842
843         port = &ports[port_id];
844         printf("\n  %s Forward statistics for port %-2d %s\n",
845                fwd_stats_border, port_id, fwd_stats_border);
846
847         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
848                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
849                        "%-"PRIu64"\n",
850                        stats->ipackets, stats->imissed,
851                        (uint64_t) (stats->ipackets + stats->imissed));
852
853                 if (cur_fwd_eng == &csum_fwd_engine)
854                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
855                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
856                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
857                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
858                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
859                 }
860
861                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
862                        "%-"PRIu64"\n",
863                        stats->opackets, port->tx_dropped,
864                        (uint64_t) (stats->opackets + port->tx_dropped));
865         }
866         else {
867                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
868                        "%14"PRIu64"\n",
869                        stats->ipackets, stats->imissed,
870                        (uint64_t) (stats->ipackets + stats->imissed));
871
872                 if (cur_fwd_eng == &csum_fwd_engine)
873                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
874                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
875                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
876                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
877                         printf("  RX-nombufs:             %14"PRIu64"\n",
878                                stats->rx_nombuf);
879                 }
880
881                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
882                        "%14"PRIu64"\n",
883                        stats->opackets, port->tx_dropped,
884                        (uint64_t) (stats->opackets + port->tx_dropped));
885         }
886
887 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
888         if (port->rx_stream)
889                 pkt_burst_stats_display("RX",
890                         &port->rx_stream->rx_burst_stats);
891         if (port->tx_stream)
892                 pkt_burst_stats_display("TX",
893                         &port->tx_stream->tx_burst_stats);
894 #endif
895
896         if (port->rx_queue_stats_mapping_enabled) {
897                 printf("\n");
898                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
899                         printf("  Stats reg %2d RX-packets:%14"PRIu64
900                                "     RX-errors:%14"PRIu64
901                                "    RX-bytes:%14"PRIu64"\n",
902                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
903                 }
904                 printf("\n");
905         }
906         if (port->tx_queue_stats_mapping_enabled) {
907                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
908                         printf("  Stats reg %2d TX-packets:%14"PRIu64
909                                "                                 TX-bytes:%14"PRIu64"\n",
910                                i, stats->q_opackets[i], stats->q_obytes[i]);
911                 }
912         }
913
914         printf("  %s--------------------------------%s\n",
915                fwd_stats_border, fwd_stats_border);
916 }
917
918 static void
919 fwd_stream_stats_display(streamid_t stream_id)
920 {
921         struct fwd_stream *fs;
922         static const char *fwd_top_stats_border = "-------";
923
924         fs = fwd_streams[stream_id];
925         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
926             (fs->fwd_dropped == 0))
927                 return;
928         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
929                "TX Port=%2d/Queue=%2d %s\n",
930                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
931                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
932         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
933                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
934
935         /* if checksum mode */
936         if (cur_fwd_eng == &csum_fwd_engine) {
937                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
938                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
939         }
940
941 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
942         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
943         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
944 #endif
945 }
946
947 static void
948 flush_fwd_rx_queues(void)
949 {
950         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
951         portid_t  rxp;
952         portid_t port_id;
953         queueid_t rxq;
954         uint16_t  nb_rx;
955         uint16_t  i;
956         uint8_t   j;
957         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
958         uint64_t timer_period;
959
960         /* convert to number of cycles */
961         timer_period = rte_get_timer_hz(); /* 1 second timeout */
962
963         for (j = 0; j < 2; j++) {
964                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
965                         for (rxq = 0; rxq < nb_rxq; rxq++) {
966                                 port_id = fwd_ports_ids[rxp];
967                                 /**
968                                 * testpmd can stuck in the below do while loop
969                                 * if rte_eth_rx_burst() always returns nonzero
970                                 * packets. So timer is added to exit this loop
971                                 * after 1sec timer expiry.
972                                 */
973                                 prev_tsc = rte_rdtsc();
974                                 do {
975                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
976                                                 pkts_burst, MAX_PKT_BURST);
977                                         for (i = 0; i < nb_rx; i++)
978                                                 rte_pktmbuf_free(pkts_burst[i]);
979
980                                         cur_tsc = rte_rdtsc();
981                                         diff_tsc = cur_tsc - prev_tsc;
982                                         timer_tsc += diff_tsc;
983                                 } while ((nb_rx > 0) &&
984                                         (timer_tsc < timer_period));
985                                 timer_tsc = 0;
986                         }
987                 }
988                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
989         }
990 }
991
992 static void
993 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
994 {
995         struct fwd_stream **fsm;
996         streamid_t nb_fs;
997         streamid_t sm_id;
998 #ifdef RTE_LIBRTE_BITRATE
999         uint64_t tics_per_1sec;
1000         uint64_t tics_datum;
1001         uint64_t tics_current;
1002         uint8_t idx_port, cnt_ports;
1003
1004         cnt_ports = rte_eth_dev_count();
1005         tics_datum = rte_rdtsc();
1006         tics_per_1sec = rte_get_timer_hz();
1007 #endif
1008         fsm = &fwd_streams[fc->stream_idx];
1009         nb_fs = fc->stream_nb;
1010         do {
1011                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1012                         (*pkt_fwd)(fsm[sm_id]);
1013 #ifdef RTE_LIBRTE_BITRATE
1014                 if (bitrate_enabled != 0 &&
1015                                 bitrate_lcore_id == rte_lcore_id()) {
1016                         tics_current = rte_rdtsc();
1017                         if (tics_current - tics_datum >= tics_per_1sec) {
1018                                 /* Periodic bitrate calculation */
1019                                 for (idx_port = 0;
1020                                                 idx_port < cnt_ports;
1021                                                 idx_port++)
1022                                         rte_stats_bitrate_calc(bitrate_data,
1023                                                 idx_port);
1024                                 tics_datum = tics_current;
1025                         }
1026                 }
1027 #endif
1028 #ifdef RTE_LIBRTE_LATENCY_STATS
1029                 if (latencystats_enabled != 0 &&
1030                                 latencystats_lcore_id == rte_lcore_id())
1031                         rte_latencystats_update();
1032 #endif
1033
1034         } while (! fc->stopped);
1035 }
1036
1037 static int
1038 start_pkt_forward_on_core(void *fwd_arg)
1039 {
1040         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1041                              cur_fwd_config.fwd_eng->packet_fwd);
1042         return 0;
1043 }
1044
1045 /*
1046  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1047  * Used to start communication flows in network loopback test configurations.
1048  */
1049 static int
1050 run_one_txonly_burst_on_core(void *fwd_arg)
1051 {
1052         struct fwd_lcore *fwd_lc;
1053         struct fwd_lcore tmp_lcore;
1054
1055         fwd_lc = (struct fwd_lcore *) fwd_arg;
1056         tmp_lcore = *fwd_lc;
1057         tmp_lcore.stopped = 1;
1058         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1059         return 0;
1060 }
1061
1062 /*
1063  * Launch packet forwarding:
1064  *     - Setup per-port forwarding context.
1065  *     - launch logical cores with their forwarding configuration.
1066  */
1067 static void
1068 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1069 {
1070         port_fwd_begin_t port_fwd_begin;
1071         unsigned int i;
1072         unsigned int lc_id;
1073         int diag;
1074
1075         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1076         if (port_fwd_begin != NULL) {
1077                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1078                         (*port_fwd_begin)(fwd_ports_ids[i]);
1079         }
1080         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1081                 lc_id = fwd_lcores_cpuids[i];
1082                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1083                         fwd_lcores[i]->stopped = 0;
1084                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1085                                                      fwd_lcores[i], lc_id);
1086                         if (diag != 0)
1087                                 printf("launch lcore %u failed - diag=%d\n",
1088                                        lc_id, diag);
1089                 }
1090         }
1091 }
1092
1093 /*
1094  * Launch packet forwarding configuration.
1095  */
1096 void
1097 start_packet_forwarding(int with_tx_first)
1098 {
1099         port_fwd_begin_t port_fwd_begin;
1100         port_fwd_end_t  port_fwd_end;
1101         struct rte_port *port;
1102         unsigned int i;
1103         portid_t   pt_id;
1104         streamid_t sm_id;
1105
1106         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1107                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1108
1109         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1110                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1111
1112         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1113                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1114                 (!nb_rxq || !nb_txq))
1115                 rte_exit(EXIT_FAILURE,
1116                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1117                         cur_fwd_eng->fwd_mode_name);
1118
1119         if (all_ports_started() == 0) {
1120                 printf("Not all ports were started\n");
1121                 return;
1122         }
1123         if (test_done == 0) {
1124                 printf("Packet forwarding already started\n");
1125                 return;
1126         }
1127
1128         if (init_fwd_streams() < 0) {
1129                 printf("Fail from init_fwd_streams()\n");
1130                 return;
1131         }
1132
1133         if(dcb_test) {
1134                 for (i = 0; i < nb_fwd_ports; i++) {
1135                         pt_id = fwd_ports_ids[i];
1136                         port = &ports[pt_id];
1137                         if (!port->dcb_flag) {
1138                                 printf("In DCB mode, all forwarding ports must "
1139                                        "be configured in this mode.\n");
1140                                 return;
1141                         }
1142                 }
1143                 if (nb_fwd_lcores == 1) {
1144                         printf("In DCB mode,the nb forwarding cores "
1145                                "should be larger than 1.\n");
1146                         return;
1147                 }
1148         }
1149         test_done = 0;
1150
1151         if(!no_flush_rx)
1152                 flush_fwd_rx_queues();
1153
1154         fwd_config_setup();
1155         pkt_fwd_config_display(&cur_fwd_config);
1156         rxtx_config_display();
1157
1158         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1159                 pt_id = fwd_ports_ids[i];
1160                 port = &ports[pt_id];
1161                 rte_eth_stats_get(pt_id, &port->stats);
1162                 port->tx_dropped = 0;
1163
1164                 map_port_queue_stats_mapping_registers(pt_id, port);
1165         }
1166         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1167                 fwd_streams[sm_id]->rx_packets = 0;
1168                 fwd_streams[sm_id]->tx_packets = 0;
1169                 fwd_streams[sm_id]->fwd_dropped = 0;
1170                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1171                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1172
1173 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1174                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1175                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1176                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1177                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1178 #endif
1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1180                 fwd_streams[sm_id]->core_cycles = 0;
1181 #endif
1182         }
1183         if (with_tx_first) {
1184                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1185                 if (port_fwd_begin != NULL) {
1186                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1187                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1188                 }
1189                 while (with_tx_first--) {
1190                         launch_packet_forwarding(
1191                                         run_one_txonly_burst_on_core);
1192                         rte_eal_mp_wait_lcore();
1193                 }
1194                 port_fwd_end = tx_only_engine.port_fwd_end;
1195                 if (port_fwd_end != NULL) {
1196                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1197                                 (*port_fwd_end)(fwd_ports_ids[i]);
1198                 }
1199         }
1200         launch_packet_forwarding(start_pkt_forward_on_core);
1201 }
1202
1203 void
1204 stop_packet_forwarding(void)
1205 {
1206         struct rte_eth_stats stats;
1207         struct rte_port *port;
1208         port_fwd_end_t  port_fwd_end;
1209         int i;
1210         portid_t   pt_id;
1211         streamid_t sm_id;
1212         lcoreid_t  lc_id;
1213         uint64_t total_recv;
1214         uint64_t total_xmit;
1215         uint64_t total_rx_dropped;
1216         uint64_t total_tx_dropped;
1217         uint64_t total_rx_nombuf;
1218         uint64_t tx_dropped;
1219         uint64_t rx_bad_ip_csum;
1220         uint64_t rx_bad_l4_csum;
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222         uint64_t fwd_cycles;
1223 #endif
1224
1225         static const char *acc_stats_border = "+++++++++++++++";
1226
1227         if (test_done) {
1228                 printf("Packet forwarding not started\n");
1229                 return;
1230         }
1231         printf("Telling cores to stop...");
1232         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1233                 fwd_lcores[lc_id]->stopped = 1;
1234         printf("\nWaiting for lcores to finish...\n");
1235         rte_eal_mp_wait_lcore();
1236         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1237         if (port_fwd_end != NULL) {
1238                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1239                         pt_id = fwd_ports_ids[i];
1240                         (*port_fwd_end)(pt_id);
1241                 }
1242         }
1243 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1244         fwd_cycles = 0;
1245 #endif
1246         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1247                 if (cur_fwd_config.nb_fwd_streams >
1248                     cur_fwd_config.nb_fwd_ports) {
1249                         fwd_stream_stats_display(sm_id);
1250                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1251                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1252                 } else {
1253                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1254                                 fwd_streams[sm_id];
1255                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1256                                 fwd_streams[sm_id];
1257                 }
1258                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1259                 tx_dropped = (uint64_t) (tx_dropped +
1260                                          fwd_streams[sm_id]->fwd_dropped);
1261                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1262
1263                 rx_bad_ip_csum =
1264                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1265                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1266                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1267                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1268                                                         rx_bad_ip_csum;
1269
1270                 rx_bad_l4_csum =
1271                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1272                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1273                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1274                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1275                                                         rx_bad_l4_csum;
1276
1277 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1278                 fwd_cycles = (uint64_t) (fwd_cycles +
1279                                          fwd_streams[sm_id]->core_cycles);
1280 #endif
1281         }
1282         total_recv = 0;
1283         total_xmit = 0;
1284         total_rx_dropped = 0;
1285         total_tx_dropped = 0;
1286         total_rx_nombuf  = 0;
1287         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1288                 pt_id = fwd_ports_ids[i];
1289
1290                 port = &ports[pt_id];
1291                 rte_eth_stats_get(pt_id, &stats);
1292                 stats.ipackets -= port->stats.ipackets;
1293                 port->stats.ipackets = 0;
1294                 stats.opackets -= port->stats.opackets;
1295                 port->stats.opackets = 0;
1296                 stats.ibytes   -= port->stats.ibytes;
1297                 port->stats.ibytes = 0;
1298                 stats.obytes   -= port->stats.obytes;
1299                 port->stats.obytes = 0;
1300                 stats.imissed  -= port->stats.imissed;
1301                 port->stats.imissed = 0;
1302                 stats.oerrors  -= port->stats.oerrors;
1303                 port->stats.oerrors = 0;
1304                 stats.rx_nombuf -= port->stats.rx_nombuf;
1305                 port->stats.rx_nombuf = 0;
1306
1307                 total_recv += stats.ipackets;
1308                 total_xmit += stats.opackets;
1309                 total_rx_dropped += stats.imissed;
1310                 total_tx_dropped += port->tx_dropped;
1311                 total_rx_nombuf  += stats.rx_nombuf;
1312
1313                 fwd_port_stats_display(pt_id, &stats);
1314         }
1315
1316         printf("\n  %s Accumulated forward statistics for all ports"
1317                "%s\n",
1318                acc_stats_border, acc_stats_border);
1319         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1320                "%-"PRIu64"\n"
1321                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1322                "%-"PRIu64"\n",
1323                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1324                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1325         if (total_rx_nombuf > 0)
1326                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1327         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1328                "%s\n",
1329                acc_stats_border, acc_stats_border);
1330 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1331         if (total_recv > 0)
1332                 printf("\n  CPU cycles/packet=%u (total cycles="
1333                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1334                        (unsigned int)(fwd_cycles / total_recv),
1335                        fwd_cycles, total_recv);
1336 #endif
1337         printf("\nDone.\n");
1338         test_done = 1;
1339 }
1340
1341 void
1342 dev_set_link_up(portid_t pid)
1343 {
1344         if (rte_eth_dev_set_link_up(pid) < 0)
1345                 printf("\nSet link up fail.\n");
1346 }
1347
1348 void
1349 dev_set_link_down(portid_t pid)
1350 {
1351         if (rte_eth_dev_set_link_down(pid) < 0)
1352                 printf("\nSet link down fail.\n");
1353 }
1354
1355 static int
1356 all_ports_started(void)
1357 {
1358         portid_t pi;
1359         struct rte_port *port;
1360
1361         RTE_ETH_FOREACH_DEV(pi) {
1362                 port = &ports[pi];
1363                 /* Check if there is a port which is not started */
1364                 if ((port->port_status != RTE_PORT_STARTED) &&
1365                         (port->slave_flag == 0))
1366                         return 0;
1367         }
1368
1369         /* No port is not started */
1370         return 1;
1371 }
1372
1373 int
1374 all_ports_stopped(void)
1375 {
1376         portid_t pi;
1377         struct rte_port *port;
1378
1379         RTE_ETH_FOREACH_DEV(pi) {
1380                 port = &ports[pi];
1381                 if ((port->port_status != RTE_PORT_STOPPED) &&
1382                         (port->slave_flag == 0))
1383                         return 0;
1384         }
1385
1386         return 1;
1387 }
1388
1389 int
1390 port_is_started(portid_t port_id)
1391 {
1392         if (port_id_is_invalid(port_id, ENABLED_WARN))
1393                 return 0;
1394
1395         if (ports[port_id].port_status != RTE_PORT_STARTED)
1396                 return 0;
1397
1398         return 1;
1399 }
1400
1401 static int
1402 port_is_closed(portid_t port_id)
1403 {
1404         if (port_id_is_invalid(port_id, ENABLED_WARN))
1405                 return 0;
1406
1407         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1408                 return 0;
1409
1410         return 1;
1411 }
1412
1413 int
1414 start_port(portid_t pid)
1415 {
1416         int diag, need_check_link_status = -1;
1417         portid_t pi;
1418         queueid_t qi;
1419         struct rte_port *port;
1420         struct ether_addr mac_addr;
1421         enum rte_eth_event_type event_type;
1422
1423         if (port_id_is_invalid(pid, ENABLED_WARN))
1424                 return 0;
1425
1426         if(dcb_config)
1427                 dcb_test = 1;
1428         RTE_ETH_FOREACH_DEV(pi) {
1429                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1430                         continue;
1431
1432                 need_check_link_status = 0;
1433                 port = &ports[pi];
1434                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1435                                                  RTE_PORT_HANDLING) == 0) {
1436                         printf("Port %d is now not stopped\n", pi);
1437                         continue;
1438                 }
1439
1440                 if (port->need_reconfig > 0) {
1441                         port->need_reconfig = 0;
1442
1443                         if (flow_isolate_all) {
1444                                 int ret = port_flow_isolate(pi, 1);
1445                                 if (ret) {
1446                                         printf("Failed to apply isolated"
1447                                                " mode on port %d\n", pi);
1448                                         return -1;
1449                                 }
1450                         }
1451
1452                         printf("Configuring Port %d (socket %u)\n", pi,
1453                                         port->socket_id);
1454                         /* configure port */
1455                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1456                                                 &(port->dev_conf));
1457                         if (diag != 0) {
1458                                 if (rte_atomic16_cmpset(&(port->port_status),
1459                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1460                                         printf("Port %d can not be set back "
1461                                                         "to stopped\n", pi);
1462                                 printf("Fail to configure port %d\n", pi);
1463                                 /* try to reconfigure port next time */
1464                                 port->need_reconfig = 1;
1465                                 return -1;
1466                         }
1467                 }
1468                 if (port->need_reconfig_queues > 0) {
1469                         port->need_reconfig_queues = 0;
1470                         /* setup tx queues */
1471                         for (qi = 0; qi < nb_txq; qi++) {
1472                                 if ((numa_support) &&
1473                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1474                                         diag = rte_eth_tx_queue_setup(pi, qi,
1475                                                 nb_txd,txring_numa[pi],
1476                                                 &(port->tx_conf));
1477                                 else
1478                                         diag = rte_eth_tx_queue_setup(pi, qi,
1479                                                 nb_txd,port->socket_id,
1480                                                 &(port->tx_conf));
1481
1482                                 if (diag == 0)
1483                                         continue;
1484
1485                                 /* Fail to setup tx queue, return */
1486                                 if (rte_atomic16_cmpset(&(port->port_status),
1487                                                         RTE_PORT_HANDLING,
1488                                                         RTE_PORT_STOPPED) == 0)
1489                                         printf("Port %d can not be set back "
1490                                                         "to stopped\n", pi);
1491                                 printf("Fail to configure port %d tx queues\n", pi);
1492                                 /* try to reconfigure queues next time */
1493                                 port->need_reconfig_queues = 1;
1494                                 return -1;
1495                         }
1496                         /* setup rx queues */
1497                         for (qi = 0; qi < nb_rxq; qi++) {
1498                                 if ((numa_support) &&
1499                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1500                                         struct rte_mempool * mp =
1501                                                 mbuf_pool_find(rxring_numa[pi]);
1502                                         if (mp == NULL) {
1503                                                 printf("Failed to setup RX queue:"
1504                                                         "No mempool allocation"
1505                                                         " on the socket %d\n",
1506                                                         rxring_numa[pi]);
1507                                                 return -1;
1508                                         }
1509
1510                                         diag = rte_eth_rx_queue_setup(pi, qi,
1511                                              nb_rxd,rxring_numa[pi],
1512                                              &(port->rx_conf),mp);
1513                                 } else {
1514                                         struct rte_mempool *mp =
1515                                                 mbuf_pool_find(port->socket_id);
1516                                         if (mp == NULL) {
1517                                                 printf("Failed to setup RX queue:"
1518                                                         "No mempool allocation"
1519                                                         " on the socket %d\n",
1520                                                         port->socket_id);
1521                                                 return -1;
1522                                         }
1523                                         diag = rte_eth_rx_queue_setup(pi, qi,
1524                                              nb_rxd,port->socket_id,
1525                                              &(port->rx_conf), mp);
1526                                 }
1527                                 if (diag == 0)
1528                                         continue;
1529
1530                                 /* Fail to setup rx queue, return */
1531                                 if (rte_atomic16_cmpset(&(port->port_status),
1532                                                         RTE_PORT_HANDLING,
1533                                                         RTE_PORT_STOPPED) == 0)
1534                                         printf("Port %d can not be set back "
1535                                                         "to stopped\n", pi);
1536                                 printf("Fail to configure port %d rx queues\n", pi);
1537                                 /* try to reconfigure queues next time */
1538                                 port->need_reconfig_queues = 1;
1539                                 return -1;
1540                         }
1541                 }
1542
1543                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1544                      event_type < RTE_ETH_EVENT_MAX;
1545                      event_type++) {
1546                         diag = rte_eth_dev_callback_register(pi,
1547                                                         event_type,
1548                                                         eth_event_callback,
1549                                                         NULL);
1550                         if (diag) {
1551                                 printf("Failed to setup even callback for event %d\n",
1552                                         event_type);
1553                                 return -1;
1554                         }
1555                 }
1556
1557                 /* start port */
1558                 if (rte_eth_dev_start(pi) < 0) {
1559                         printf("Fail to start port %d\n", pi);
1560
1561                         /* Fail to setup rx queue, return */
1562                         if (rte_atomic16_cmpset(&(port->port_status),
1563                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1564                                 printf("Port %d can not be set back to "
1565                                                         "stopped\n", pi);
1566                         continue;
1567                 }
1568
1569                 if (rte_atomic16_cmpset(&(port->port_status),
1570                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1571                         printf("Port %d can not be set into started\n", pi);
1572
1573                 rte_eth_macaddr_get(pi, &mac_addr);
1574                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1575                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1576                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1577                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1578
1579                 /* at least one port started, need checking link status */
1580                 need_check_link_status = 1;
1581         }
1582
1583         if (need_check_link_status == 1 && !no_link_check)
1584                 check_all_ports_link_status(RTE_PORT_ALL);
1585         else if (need_check_link_status == 0)
1586                 printf("Please stop the ports first\n");
1587
1588         printf("Done\n");
1589         return 0;
1590 }
1591
1592 void
1593 stop_port(portid_t pid)
1594 {
1595         portid_t pi;
1596         struct rte_port *port;
1597         int need_check_link_status = 0;
1598
1599         if (dcb_test) {
1600                 dcb_test = 0;
1601                 dcb_config = 0;
1602         }
1603
1604         if (port_id_is_invalid(pid, ENABLED_WARN))
1605                 return;
1606
1607         printf("Stopping ports...\n");
1608
1609         RTE_ETH_FOREACH_DEV(pi) {
1610                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1611                         continue;
1612
1613                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1614                         printf("Please remove port %d from forwarding configuration.\n", pi);
1615                         continue;
1616                 }
1617
1618                 if (port_is_bonding_slave(pi)) {
1619                         printf("Please remove port %d from bonded device.\n", pi);
1620                         continue;
1621                 }
1622
1623                 port = &ports[pi];
1624                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1625                                                 RTE_PORT_HANDLING) == 0)
1626                         continue;
1627
1628                 rte_eth_dev_stop(pi);
1629
1630                 if (rte_atomic16_cmpset(&(port->port_status),
1631                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1632                         printf("Port %d can not be set into stopped\n", pi);
1633                 need_check_link_status = 1;
1634         }
1635         if (need_check_link_status && !no_link_check)
1636                 check_all_ports_link_status(RTE_PORT_ALL);
1637
1638         printf("Done\n");
1639 }
1640
1641 void
1642 close_port(portid_t pid)
1643 {
1644         portid_t pi;
1645         struct rte_port *port;
1646
1647         if (port_id_is_invalid(pid, ENABLED_WARN))
1648                 return;
1649
1650         printf("Closing ports...\n");
1651
1652         RTE_ETH_FOREACH_DEV(pi) {
1653                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1654                         continue;
1655
1656                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1657                         printf("Please remove port %d from forwarding configuration.\n", pi);
1658                         continue;
1659                 }
1660
1661                 if (port_is_bonding_slave(pi)) {
1662                         printf("Please remove port %d from bonded device.\n", pi);
1663                         continue;
1664                 }
1665
1666                 port = &ports[pi];
1667                 if (rte_atomic16_cmpset(&(port->port_status),
1668                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1669                         printf("Port %d is already closed\n", pi);
1670                         continue;
1671                 }
1672
1673                 if (rte_atomic16_cmpset(&(port->port_status),
1674                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1675                         printf("Port %d is now not stopped\n", pi);
1676                         continue;
1677                 }
1678
1679                 if (port->flow_list)
1680                         port_flow_flush(pi);
1681                 rte_eth_dev_close(pi);
1682
1683                 if (rte_atomic16_cmpset(&(port->port_status),
1684                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1685                         printf("Port %d cannot be set to closed\n", pi);
1686         }
1687
1688         printf("Done\n");
1689 }
1690
1691 void
1692 reset_port(portid_t pid)
1693 {
1694         int diag;
1695         portid_t pi;
1696         struct rte_port *port;
1697
1698         if (port_id_is_invalid(pid, ENABLED_WARN))
1699                 return;
1700
1701         printf("Resetting ports...\n");
1702
1703         RTE_ETH_FOREACH_DEV(pi) {
1704                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1705                         continue;
1706
1707                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1708                         printf("Please remove port %d from forwarding "
1709                                "configuration.\n", pi);
1710                         continue;
1711                 }
1712
1713                 if (port_is_bonding_slave(pi)) {
1714                         printf("Please remove port %d from bonded device.\n",
1715                                pi);
1716                         continue;
1717                 }
1718
1719                 diag = rte_eth_dev_reset(pi);
1720                 if (diag == 0) {
1721                         port = &ports[pi];
1722                         port->need_reconfig = 1;
1723                         port->need_reconfig_queues = 1;
1724                 } else {
1725                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1726                 }
1727         }
1728
1729         printf("Done\n");
1730 }
1731
1732 void
1733 attach_port(char *identifier)
1734 {
1735         portid_t pi = 0;
1736         unsigned int socket_id;
1737
1738         printf("Attaching a new port...\n");
1739
1740         if (identifier == NULL) {
1741                 printf("Invalid parameters are specified\n");
1742                 return;
1743         }
1744
1745         if (rte_eth_dev_attach(identifier, &pi))
1746                 return;
1747
1748         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1749         /* if socket_id is invalid, set to 0 */
1750         if (check_socket_id(socket_id) < 0)
1751                 socket_id = 0;
1752         reconfig(pi, socket_id);
1753         rte_eth_promiscuous_enable(pi);
1754
1755         nb_ports = rte_eth_dev_count();
1756
1757         ports[pi].port_status = RTE_PORT_STOPPED;
1758
1759         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1760         printf("Done\n");
1761 }
1762
1763 void
1764 detach_port(portid_t port_id)
1765 {
1766         char name[RTE_ETH_NAME_MAX_LEN];
1767
1768         printf("Detaching a port...\n");
1769
1770         if (!port_is_closed(port_id)) {
1771                 printf("Please close port first\n");
1772                 return;
1773         }
1774
1775         if (ports[port_id].flow_list)
1776                 port_flow_flush(port_id);
1777
1778         if (rte_eth_dev_detach(port_id, name)) {
1779                 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1780                 return;
1781         }
1782
1783         nb_ports = rte_eth_dev_count();
1784
1785         printf("Port '%s' is detached. Now total ports is %d\n",
1786                         name, nb_ports);
1787         printf("Done\n");
1788         return;
1789 }
1790
1791 void
1792 pmd_test_exit(void)
1793 {
1794         portid_t pt_id;
1795
1796         if (test_done == 0)
1797                 stop_packet_forwarding();
1798
1799         if (ports != NULL) {
1800                 no_link_check = 1;
1801                 RTE_ETH_FOREACH_DEV(pt_id) {
1802                         printf("\nShutting down port %d...\n", pt_id);
1803                         fflush(stdout);
1804                         stop_port(pt_id);
1805                         close_port(pt_id);
1806                 }
1807         }
1808         printf("\nBye...\n");
1809 }
1810
1811 typedef void (*cmd_func_t)(void);
1812 struct pmd_test_command {
1813         const char *cmd_name;
1814         cmd_func_t cmd_func;
1815 };
1816
1817 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1818
1819 /* Check the link status of all ports in up to 9s, and print them finally */
1820 static void
1821 check_all_ports_link_status(uint32_t port_mask)
1822 {
1823 #define CHECK_INTERVAL 100 /* 100ms */
1824 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1825         portid_t portid;
1826         uint8_t count, all_ports_up, print_flag = 0;
1827         struct rte_eth_link link;
1828
1829         printf("Checking link statuses...\n");
1830         fflush(stdout);
1831         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1832                 all_ports_up = 1;
1833                 RTE_ETH_FOREACH_DEV(portid) {
1834                         if ((port_mask & (1 << portid)) == 0)
1835                                 continue;
1836                         memset(&link, 0, sizeof(link));
1837                         rte_eth_link_get_nowait(portid, &link);
1838                         /* print link status if flag set */
1839                         if (print_flag == 1) {
1840                                 if (link.link_status)
1841                                         printf(
1842                                         "Port%d Link Up. speed %u Mbps- %s\n",
1843                                         portid, link.link_speed,
1844                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1845                                         ("full-duplex") : ("half-duplex\n"));
1846                                 else
1847                                         printf("Port %d Link Down\n", portid);
1848                                 continue;
1849                         }
1850                         /* clear all_ports_up flag if any link down */
1851                         if (link.link_status == ETH_LINK_DOWN) {
1852                                 all_ports_up = 0;
1853                                 break;
1854                         }
1855                 }
1856                 /* after finally printing all link status, get out */
1857                 if (print_flag == 1)
1858                         break;
1859
1860                 if (all_ports_up == 0) {
1861                         fflush(stdout);
1862                         rte_delay_ms(CHECK_INTERVAL);
1863                 }
1864
1865                 /* set the print_flag if all ports up or timeout */
1866                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1867                         print_flag = 1;
1868                 }
1869
1870                 if (lsc_interrupt)
1871                         break;
1872         }
1873 }
1874
1875 static void
1876 rmv_event_callback(void *arg)
1877 {
1878         struct rte_eth_dev *dev;
1879         portid_t port_id = (intptr_t)arg;
1880
1881         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1882         dev = &rte_eth_devices[port_id];
1883
1884         stop_port(port_id);
1885         close_port(port_id);
1886         printf("removing device %s\n", dev->device->name);
1887         if (rte_eal_dev_detach(dev->device))
1888                 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1889                         dev->device->name);
1890 }
1891
1892 /* This function is used by the interrupt thread */
1893 static int
1894 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1895                   void *ret_param)
1896 {
1897         static const char * const event_desc[] = {
1898                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1899                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1900                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1901                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1902                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1903                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1904                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1905                 [RTE_ETH_EVENT_MAX] = NULL,
1906         };
1907
1908         RTE_SET_USED(param);
1909         RTE_SET_USED(ret_param);
1910
1911         if (type >= RTE_ETH_EVENT_MAX) {
1912                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1913                         port_id, __func__, type);
1914                 fflush(stderr);
1915         } else if (event_print_mask & (UINT32_C(1) << type)) {
1916                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1917                         event_desc[type]);
1918                 fflush(stdout);
1919         }
1920
1921         switch (type) {
1922         case RTE_ETH_EVENT_INTR_RMV:
1923                 if (rte_eal_alarm_set(100000,
1924                                 rmv_event_callback, (void *)(intptr_t)port_id))
1925                         fprintf(stderr, "Could not set up deferred device removal\n");
1926                 break;
1927         default:
1928                 break;
1929         }
1930         return 0;
1931 }
1932
1933 static int
1934 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1935 {
1936         uint16_t i;
1937         int diag;
1938         uint8_t mapping_found = 0;
1939
1940         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1941                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1942                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1943                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1944                                         tx_queue_stats_mappings[i].queue_id,
1945                                         tx_queue_stats_mappings[i].stats_counter_id);
1946                         if (diag != 0)
1947                                 return diag;
1948                         mapping_found = 1;
1949                 }
1950         }
1951         if (mapping_found)
1952                 port->tx_queue_stats_mapping_enabled = 1;
1953         return 0;
1954 }
1955
1956 static int
1957 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1958 {
1959         uint16_t i;
1960         int diag;
1961         uint8_t mapping_found = 0;
1962
1963         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1964                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1965                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1966                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1967                                         rx_queue_stats_mappings[i].queue_id,
1968                                         rx_queue_stats_mappings[i].stats_counter_id);
1969                         if (diag != 0)
1970                                 return diag;
1971                         mapping_found = 1;
1972                 }
1973         }
1974         if (mapping_found)
1975                 port->rx_queue_stats_mapping_enabled = 1;
1976         return 0;
1977 }
1978
1979 static void
1980 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1981 {
1982         int diag = 0;
1983
1984         diag = set_tx_queue_stats_mapping_registers(pi, port);
1985         if (diag != 0) {
1986                 if (diag == -ENOTSUP) {
1987                         port->tx_queue_stats_mapping_enabled = 0;
1988                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1989                 }
1990                 else
1991                         rte_exit(EXIT_FAILURE,
1992                                         "set_tx_queue_stats_mapping_registers "
1993                                         "failed for port id=%d diag=%d\n",
1994                                         pi, diag);
1995         }
1996
1997         diag = set_rx_queue_stats_mapping_registers(pi, port);
1998         if (diag != 0) {
1999                 if (diag == -ENOTSUP) {
2000                         port->rx_queue_stats_mapping_enabled = 0;
2001                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2002                 }
2003                 else
2004                         rte_exit(EXIT_FAILURE,
2005                                         "set_rx_queue_stats_mapping_registers "
2006                                         "failed for port id=%d diag=%d\n",
2007                                         pi, diag);
2008         }
2009 }
2010
2011 static void
2012 rxtx_port_config(struct rte_port *port)
2013 {
2014         port->rx_conf = port->dev_info.default_rxconf;
2015         port->tx_conf = port->dev_info.default_txconf;
2016
2017         /* Check if any RX/TX parameters have been passed */
2018         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2019                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2020
2021         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2022                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2023
2024         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2025                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2026
2027         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2028                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2029
2030         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2031                 port->rx_conf.rx_drop_en = rx_drop_en;
2032
2033         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2034                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2035
2036         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2037                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2038
2039         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2040                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2041
2042         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2043                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2044
2045         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2046                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2047
2048         if (txq_flags != RTE_PMD_PARAM_UNSET)
2049                 port->tx_conf.txq_flags = txq_flags;
2050 }
2051
2052 void
2053 init_port_config(void)
2054 {
2055         portid_t pid;
2056         struct rte_port *port;
2057
2058         RTE_ETH_FOREACH_DEV(pid) {
2059                 port = &ports[pid];
2060                 port->dev_conf.rxmode = rx_mode;
2061                 port->dev_conf.fdir_conf = fdir_conf;
2062                 if (nb_rxq > 1) {
2063                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2064                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2065                 } else {
2066                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2067                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2068                 }
2069
2070                 if (port->dcb_flag == 0) {
2071                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2072                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2073                         else
2074                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2075                 }
2076
2077                 rxtx_port_config(port);
2078
2079                 rte_eth_macaddr_get(pid, &port->eth_addr);
2080
2081                 map_port_queue_stats_mapping_registers(pid, port);
2082 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2083                 rte_pmd_ixgbe_bypass_init(pid);
2084 #endif
2085
2086                 if (lsc_interrupt &&
2087                     (rte_eth_devices[pid].data->dev_flags &
2088                      RTE_ETH_DEV_INTR_LSC))
2089                         port->dev_conf.intr_conf.lsc = 1;
2090                 if (rmv_interrupt &&
2091                     (rte_eth_devices[pid].data->dev_flags &
2092                      RTE_ETH_DEV_INTR_RMV))
2093                         port->dev_conf.intr_conf.rmv = 1;
2094
2095 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2096                 /* Detect softnic port */
2097                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2098                         port->softnic_enable = 1;
2099                         memset(&port->softport, 0, sizeof(struct softnic_port));
2100
2101                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2102                                 port->softport.tm_flag = 1;
2103                 }
2104 #endif
2105         }
2106 }
2107
2108 void set_port_slave_flag(portid_t slave_pid)
2109 {
2110         struct rte_port *port;
2111
2112         port = &ports[slave_pid];
2113         port->slave_flag = 1;
2114 }
2115
2116 void clear_port_slave_flag(portid_t slave_pid)
2117 {
2118         struct rte_port *port;
2119
2120         port = &ports[slave_pid];
2121         port->slave_flag = 0;
2122 }
2123
2124 uint8_t port_is_bonding_slave(portid_t slave_pid)
2125 {
2126         struct rte_port *port;
2127
2128         port = &ports[slave_pid];
2129         return port->slave_flag;
2130 }
2131
2132 const uint16_t vlan_tags[] = {
2133                 0,  1,  2,  3,  4,  5,  6,  7,
2134                 8,  9, 10, 11,  12, 13, 14, 15,
2135                 16, 17, 18, 19, 20, 21, 22, 23,
2136                 24, 25, 26, 27, 28, 29, 30, 31
2137 };
2138
2139 static  int
2140 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2141                  enum dcb_mode_enable dcb_mode,
2142                  enum rte_eth_nb_tcs num_tcs,
2143                  uint8_t pfc_en)
2144 {
2145         uint8_t i;
2146
2147         /*
2148          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2149          * given above, and the number of traffic classes available for use.
2150          */
2151         if (dcb_mode == DCB_VT_ENABLED) {
2152                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2153                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2154                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2155                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2156
2157                 /* VMDQ+DCB RX and TX configurations */
2158                 vmdq_rx_conf->enable_default_pool = 0;
2159                 vmdq_rx_conf->default_pool = 0;
2160                 vmdq_rx_conf->nb_queue_pools =
2161                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2162                 vmdq_tx_conf->nb_queue_pools =
2163                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2164
2165                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2166                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2167                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2168                         vmdq_rx_conf->pool_map[i].pools =
2169                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2170                 }
2171                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2172                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2173                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2174                 }
2175
2176                 /* set DCB mode of RX and TX of multiple queues */
2177                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2178                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2179         } else {
2180                 struct rte_eth_dcb_rx_conf *rx_conf =
2181                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2182                 struct rte_eth_dcb_tx_conf *tx_conf =
2183                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2184
2185                 rx_conf->nb_tcs = num_tcs;
2186                 tx_conf->nb_tcs = num_tcs;
2187
2188                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2189                         rx_conf->dcb_tc[i] = i % num_tcs;
2190                         tx_conf->dcb_tc[i] = i % num_tcs;
2191                 }
2192                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2193                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2194                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2195         }
2196
2197         if (pfc_en)
2198                 eth_conf->dcb_capability_en =
2199                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2200         else
2201                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2202
2203         return 0;
2204 }
2205
2206 int
2207 init_port_dcb_config(portid_t pid,
2208                      enum dcb_mode_enable dcb_mode,
2209                      enum rte_eth_nb_tcs num_tcs,
2210                      uint8_t pfc_en)
2211 {
2212         struct rte_eth_conf port_conf;
2213         struct rte_port *rte_port;
2214         int retval;
2215         uint16_t i;
2216
2217         rte_port = &ports[pid];
2218
2219         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2220         /* Enter DCB configuration status */
2221         dcb_config = 1;
2222
2223         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2224         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2225         if (retval < 0)
2226                 return retval;
2227         port_conf.rxmode.hw_vlan_filter = 1;
2228
2229         /**
2230          * Write the configuration into the device.
2231          * Set the numbers of RX & TX queues to 0, so
2232          * the RX & TX queues will not be setup.
2233          */
2234         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2235
2236         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2237
2238         /* If dev_info.vmdq_pool_base is greater than 0,
2239          * the queue id of vmdq pools is started after pf queues.
2240          */
2241         if (dcb_mode == DCB_VT_ENABLED &&
2242             rte_port->dev_info.vmdq_pool_base > 0) {
2243                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2244                         " for port %d.", pid);
2245                 return -1;
2246         }
2247
2248         /* Assume the ports in testpmd have the same dcb capability
2249          * and has the same number of rxq and txq in dcb mode
2250          */
2251         if (dcb_mode == DCB_VT_ENABLED) {
2252                 if (rte_port->dev_info.max_vfs > 0) {
2253                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2254                         nb_txq = rte_port->dev_info.nb_tx_queues;
2255                 } else {
2256                         nb_rxq = rte_port->dev_info.max_rx_queues;
2257                         nb_txq = rte_port->dev_info.max_tx_queues;
2258                 }
2259         } else {
2260                 /*if vt is disabled, use all pf queues */
2261                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2262                         nb_rxq = rte_port->dev_info.max_rx_queues;
2263                         nb_txq = rte_port->dev_info.max_tx_queues;
2264                 } else {
2265                         nb_rxq = (queueid_t)num_tcs;
2266                         nb_txq = (queueid_t)num_tcs;
2267
2268                 }
2269         }
2270         rx_free_thresh = 64;
2271
2272         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2273
2274         rxtx_port_config(rte_port);
2275         /* VLAN filter */
2276         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2277         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2278                 rx_vft_set(pid, vlan_tags[i], 1);
2279
2280         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2281         map_port_queue_stats_mapping_registers(pid, rte_port);
2282
2283         rte_port->dcb_flag = 1;
2284
2285         return 0;
2286 }
2287
2288 static void
2289 init_port(void)
2290 {
2291         /* Configuration of Ethernet ports. */
2292         ports = rte_zmalloc("testpmd: ports",
2293                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2294                             RTE_CACHE_LINE_SIZE);
2295         if (ports == NULL) {
2296                 rte_exit(EXIT_FAILURE,
2297                                 "rte_zmalloc(%d struct rte_port) failed\n",
2298                                 RTE_MAX_ETHPORTS);
2299         }
2300 }
2301
2302 static void
2303 force_quit(void)
2304 {
2305         pmd_test_exit();
2306         prompt_exit();
2307 }
2308
2309 static void
2310 print_stats(void)
2311 {
2312         uint8_t i;
2313         const char clr[] = { 27, '[', '2', 'J', '\0' };
2314         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2315
2316         /* Clear screen and move to top left */
2317         printf("%s%s", clr, top_left);
2318
2319         printf("\nPort statistics ====================================");
2320         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2321                 nic_stats_display(fwd_ports_ids[i]);
2322 }
2323
2324 static void
2325 signal_handler(int signum)
2326 {
2327         if (signum == SIGINT || signum == SIGTERM) {
2328                 printf("\nSignal %d received, preparing to exit...\n",
2329                                 signum);
2330 #ifdef RTE_LIBRTE_PDUMP
2331                 /* uninitialize packet capture framework */
2332                 rte_pdump_uninit();
2333 #endif
2334 #ifdef RTE_LIBRTE_LATENCY_STATS
2335                 rte_latencystats_uninit();
2336 #endif
2337                 force_quit();
2338                 /* Set flag to indicate the force termination. */
2339                 f_quit = 1;
2340                 /* exit with the expected status */
2341                 signal(signum, SIG_DFL);
2342                 kill(getpid(), signum);
2343         }
2344 }
2345
2346 int
2347 main(int argc, char** argv)
2348 {
2349         int  diag;
2350         portid_t port_id;
2351
2352         signal(SIGINT, signal_handler);
2353         signal(SIGTERM, signal_handler);
2354
2355         diag = rte_eal_init(argc, argv);
2356         if (diag < 0)
2357                 rte_panic("Cannot init EAL\n");
2358
2359         testpmd_logtype = rte_log_register("testpmd");
2360         if (testpmd_logtype < 0)
2361                 rte_panic("Cannot register log type");
2362         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2363
2364         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2365                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2366                         strerror(errno));
2367         }
2368
2369 #ifdef RTE_LIBRTE_PDUMP
2370         /* initialize packet capture framework */
2371         rte_pdump_init(NULL);
2372 #endif
2373
2374         nb_ports = (portid_t) rte_eth_dev_count();
2375         if (nb_ports == 0)
2376                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2377
2378         /* allocate port structures, and init them */
2379         init_port();
2380
2381         set_def_fwd_config();
2382         if (nb_lcores == 0)
2383                 rte_panic("Empty set of forwarding logical cores - check the "
2384                           "core mask supplied in the command parameters\n");
2385
2386         /* Bitrate/latency stats disabled by default */
2387 #ifdef RTE_LIBRTE_BITRATE
2388         bitrate_enabled = 0;
2389 #endif
2390 #ifdef RTE_LIBRTE_LATENCY_STATS
2391         latencystats_enabled = 0;
2392 #endif
2393
2394         argc -= diag;
2395         argv += diag;
2396         if (argc > 1)
2397                 launch_args_parse(argc, argv);
2398
2399         if (tx_first && interactive)
2400                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2401                                 "interactive mode.\n");
2402
2403         if (tx_first && lsc_interrupt) {
2404                 printf("Warning: lsc_interrupt needs to be off when "
2405                                 " using tx_first. Disabling.\n");
2406                 lsc_interrupt = 0;
2407         }
2408
2409         if (!nb_rxq && !nb_txq)
2410                 printf("Warning: Either rx or tx queues should be non-zero\n");
2411
2412         if (nb_rxq > 1 && nb_rxq > nb_txq)
2413                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2414                        "but nb_txq=%d will prevent to fully test it.\n",
2415                        nb_rxq, nb_txq);
2416
2417         init_config();
2418         if (start_port(RTE_PORT_ALL) != 0)
2419                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2420
2421         /* set all ports to promiscuous mode by default */
2422         RTE_ETH_FOREACH_DEV(port_id)
2423                 rte_eth_promiscuous_enable(port_id);
2424
2425         /* Init metrics library */
2426         rte_metrics_init(rte_socket_id());
2427
2428 #ifdef RTE_LIBRTE_LATENCY_STATS
2429         if (latencystats_enabled != 0) {
2430                 int ret = rte_latencystats_init(1, NULL);
2431                 if (ret)
2432                         printf("Warning: latencystats init()"
2433                                 " returned error %d\n", ret);
2434                 printf("Latencystats running on lcore %d\n",
2435                         latencystats_lcore_id);
2436         }
2437 #endif
2438
2439         /* Setup bitrate stats */
2440 #ifdef RTE_LIBRTE_BITRATE
2441         if (bitrate_enabled != 0) {
2442                 bitrate_data = rte_stats_bitrate_create();
2443                 if (bitrate_data == NULL)
2444                         rte_exit(EXIT_FAILURE,
2445                                 "Could not allocate bitrate data.\n");
2446                 rte_stats_bitrate_reg(bitrate_data);
2447         }
2448 #endif
2449
2450 #ifdef RTE_LIBRTE_CMDLINE
2451         if (strlen(cmdline_filename) != 0)
2452                 cmdline_read_from_file(cmdline_filename);
2453
2454         if (interactive == 1) {
2455                 if (auto_start) {
2456                         printf("Start automatic packet forwarding\n");
2457                         start_packet_forwarding(0);
2458                 }
2459                 prompt();
2460                 pmd_test_exit();
2461         } else
2462 #endif
2463         {
2464                 char c;
2465                 int rc;
2466
2467                 f_quit = 0;
2468
2469                 printf("No commandline core given, start packet forwarding\n");
2470                 start_packet_forwarding(tx_first);
2471                 if (stats_period != 0) {
2472                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2473                         uint64_t timer_period;
2474
2475                         /* Convert to number of cycles */
2476                         timer_period = stats_period * rte_get_timer_hz();
2477
2478                         while (f_quit == 0) {
2479                                 cur_time = rte_get_timer_cycles();
2480                                 diff_time += cur_time - prev_time;
2481
2482                                 if (diff_time >= timer_period) {
2483                                         print_stats();
2484                                         /* Reset the timer */
2485                                         diff_time = 0;
2486                                 }
2487                                 /* Sleep to avoid unnecessary checks */
2488                                 prev_time = cur_time;
2489                                 sleep(1);
2490                         }
2491                 }
2492
2493                 printf("Press enter to exit\n");
2494                 rc = read(0, &c, 1);
2495                 pmd_test_exit();
2496                 if (rc < 0)
2497                         return 1;
2498         }
2499
2500         return 0;
2501 }