app: use SPDX tag for Intel copyright files
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61
62 #include "testpmd.h"
63
64 uint16_t verbose_level = 0; /**< Silent by default. */
65
66 /* use master core for command line ? */
67 uint8_t interactive = 0;
68 uint8_t auto_start = 0;
69 uint8_t tx_first;
70 char cmdline_filename[PATH_MAX] = {0};
71
72 /*
73  * NUMA support configuration.
74  * When set, the NUMA support attempts to dispatch the allocation of the
75  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
76  * probed ports among the CPU sockets 0 and 1.
77  * Otherwise, all memory is allocated from CPU socket 0.
78  */
79 uint8_t numa_support = 1; /**< numa enabled by default */
80
81 /*
82  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
83  * not configured.
84  */
85 uint8_t socket_num = UMA_NO_CONFIG;
86
87 /*
88  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
89  */
90 uint8_t mp_anon = 0;
91
92 /*
93  * Record the Ethernet address of peer target ports to which packets are
94  * forwarded.
95  * Must be instantiated with the ethernet addresses of peer traffic generator
96  * ports.
97  */
98 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
99 portid_t nb_peer_eth_addrs = 0;
100
101 /*
102  * Probed Target Environment.
103  */
104 struct rte_port *ports;        /**< For all probed ethernet ports. */
105 portid_t nb_ports;             /**< Number of probed ethernet ports. */
106 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
107 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
108
109 /*
110  * Test Forwarding Configuration.
111  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
112  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
113  */
114 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
115 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
116 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
117 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
118
119 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
120 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
121
122 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
123 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
124
125 /*
126  * Forwarding engines.
127  */
128 struct fwd_engine * fwd_engines[] = {
129         &io_fwd_engine,
130         &mac_fwd_engine,
131         &mac_swap_engine,
132         &flow_gen_engine,
133         &rx_only_engine,
134         &tx_only_engine,
135         &csum_fwd_engine,
136         &icmp_echo_engine,
137 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
138         &softnic_tm_engine,
139         &softnic_tm_bypass_engine,
140 #endif
141 #ifdef RTE_LIBRTE_IEEE1588
142         &ieee1588_fwd_engine,
143 #endif
144         NULL,
145 };
146
147 struct fwd_config cur_fwd_config;
148 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
149 uint32_t retry_enabled;
150 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
151 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
152
153 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
154 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
155                                       * specified on command-line. */
156 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
157
158 /*
159  * In container, it cannot terminate the process which running with 'stats-period'
160  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
161  */
162 uint8_t f_quit;
163
164 /*
165  * Configuration of packet segments used by the "txonly" processing engine.
166  */
167 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
168 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
169         TXONLY_DEF_PACKET_LEN,
170 };
171 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172
173 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
174 /**< Split policy for packets to TX. */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /*
186  * Configurable number of RX/TX queues.
187  */
188 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
189 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
190
191 /*
192  * Configurable number of RX/TX ring descriptors.
193  */
194 #define RTE_TEST_RX_DESC_DEFAULT 128
195 #define RTE_TEST_TX_DESC_DEFAULT 512
196 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
197 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
198
199 #define RTE_PMD_PARAM_UNSET -1
200 /*
201  * Configurable values of RX and TX ring threshold registers.
202  */
203
204 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
205 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
207
208 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
209 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
211
212 /*
213  * Configurable value of RX free threshold.
214  */
215 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
216
217 /*
218  * Configurable value of RX drop enable.
219  */
220 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
221
222 /*
223  * Configurable value of TX free threshold.
224  */
225 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
226
227 /*
228  * Configurable value of TX RS bit threshold.
229  */
230 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
231
232 /*
233  * Configurable value of TX queue flags.
234  */
235 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
236
237 /*
238  * Receive Side Scaling (RSS) configuration.
239  */
240 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
241
242 /*
243  * Port topology configuration
244  */
245 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
246
247 /*
248  * Avoids to flush all the RX streams before starts forwarding.
249  */
250 uint8_t no_flush_rx = 0; /* flush by default */
251
252 /*
253  * Flow API isolated mode.
254  */
255 uint8_t flow_isolate_all;
256
257 /*
258  * Avoids to check link status when starting/stopping a port.
259  */
260 uint8_t no_link_check = 0; /* check by default */
261
262 /*
263  * Enable link status change notification
264  */
265 uint8_t lsc_interrupt = 1; /* enabled by default */
266
267 /*
268  * Enable device removal notification.
269  */
270 uint8_t rmv_interrupt = 1; /* enabled by default */
271
272 /*
273  * Display or mask ether events
274  * Default to all events except VF_MBOX
275  */
276 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
277                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
278                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
279                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
280                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
281                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
282
283 /*
284  * NIC bypass mode configuration options.
285  */
286
287 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
290 #endif
291
292
293 #ifdef RTE_LIBRTE_LATENCY_STATS
294
295 /*
296  * Set when latency stats is enabled in the commandline
297  */
298 uint8_t latencystats_enabled;
299
300 /*
301  * Lcore ID to serive latency statistics.
302  */
303 lcoreid_t latencystats_lcore_id = -1;
304
305 #endif
306
307 /*
308  * Ethernet device configuration.
309  */
310 struct rte_eth_rxmode rx_mode = {
311         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
312         .split_hdr_size = 0,
313         .header_split   = 0, /**< Header Split disabled. */
314         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
317         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
319         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
320         .hw_timestamp   = 0, /**< HW timestamp enabled. */
321 };
322
323 struct rte_fdir_conf fdir_conf = {
324         .mode = RTE_FDIR_MODE_NONE,
325         .pballoc = RTE_FDIR_PBALLOC_64K,
326         .status = RTE_FDIR_REPORT_STATUS,
327         .mask = {
328                 .vlan_tci_mask = 0x0,
329                 .ipv4_mask     = {
330                         .src_ip = 0xFFFFFFFF,
331                         .dst_ip = 0xFFFFFFFF,
332                 },
333                 .ipv6_mask     = {
334                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336                 },
337                 .src_port_mask = 0xFFFF,
338                 .dst_port_mask = 0xFFFF,
339                 .mac_addr_byte_mask = 0xFF,
340                 .tunnel_type_mask = 1,
341                 .tunnel_id_mask = 0xFFFFFFFF,
342         },
343         .drop_queue = 127,
344 };
345
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
356
357 /*
358  * Display zero values by default for xstats
359  */
360 uint8_t xstats_hide_zero;
361
362 unsigned int num_sockets = 0;
363 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
364
365 #ifdef RTE_LIBRTE_BITRATE
366 /* Bitrate statistics */
367 struct rte_stats_bitrates *bitrate_data;
368 lcoreid_t bitrate_lcore_id;
369 uint8_t bitrate_enabled;
370 #endif
371
372 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
373 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
374
375 /* Forward function declarations */
376 static void map_port_queue_stats_mapping_registers(portid_t pi,
377                                                    struct rte_port *port);
378 static void check_all_ports_link_status(uint32_t port_mask);
379 static int eth_event_callback(portid_t port_id,
380                               enum rte_eth_event_type type,
381                               void *param, void *ret_param);
382
383 /*
384  * Check if all the ports are started.
385  * If yes, return positive value. If not, return zero.
386  */
387 static int all_ports_started(void);
388
389 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
390 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
391
392 /*
393  * Helper function to check if socket is already discovered.
394  * If yes, return positive value. If not, return zero.
395  */
396 int
397 new_socket_id(unsigned int socket_id)
398 {
399         unsigned int i;
400
401         for (i = 0; i < num_sockets; i++) {
402                 if (socket_ids[i] == socket_id)
403                         return 0;
404         }
405         return 1;
406 }
407
408 /*
409  * Setup default configuration.
410  */
411 static void
412 set_default_fwd_lcores_config(void)
413 {
414         unsigned int i;
415         unsigned int nb_lc;
416         unsigned int sock_num;
417
418         nb_lc = 0;
419         for (i = 0; i < RTE_MAX_LCORE; i++) {
420                 sock_num = rte_lcore_to_socket_id(i);
421                 if (new_socket_id(sock_num)) {
422                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
423                                 rte_exit(EXIT_FAILURE,
424                                          "Total sockets greater than %u\n",
425                                          RTE_MAX_NUMA_NODES);
426                         }
427                         socket_ids[num_sockets++] = sock_num;
428                 }
429                 if (!rte_lcore_is_enabled(i))
430                         continue;
431                 if (i == rte_get_master_lcore())
432                         continue;
433                 fwd_lcores_cpuids[nb_lc++] = i;
434         }
435         nb_lcores = (lcoreid_t) nb_lc;
436         nb_cfg_lcores = nb_lcores;
437         nb_fwd_lcores = 1;
438 }
439
440 static void
441 set_def_peer_eth_addrs(void)
442 {
443         portid_t i;
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
447                 peer_eth_addrs[i].addr_bytes[5] = i;
448         }
449 }
450
451 static void
452 set_default_fwd_ports_config(void)
453 {
454         portid_t pt_id;
455         int i = 0;
456
457         RTE_ETH_FOREACH_DEV(pt_id)
458                 fwd_ports_ids[i++] = pt_id;
459
460         nb_cfg_ports = nb_ports;
461         nb_fwd_ports = nb_ports;
462 }
463
464 void
465 set_def_fwd_config(void)
466 {
467         set_default_fwd_lcores_config();
468         set_def_peer_eth_addrs();
469         set_default_fwd_ports_config();
470 }
471
472 /*
473  * Configuration initialisation done once at init time.
474  */
475 static void
476 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
477                  unsigned int socket_id)
478 {
479         char pool_name[RTE_MEMPOOL_NAMESIZE];
480         struct rte_mempool *rte_mp = NULL;
481         uint32_t mb_size;
482
483         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
484         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
485
486         RTE_LOG(INFO, USER1,
487                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
488                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
489
490         if (mp_anon != 0) {
491                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
492                         mb_size, (unsigned) mb_mempool_cache,
493                         sizeof(struct rte_pktmbuf_pool_private),
494                         socket_id, 0);
495                 if (rte_mp == NULL)
496                         goto err;
497
498                 if (rte_mempool_populate_anon(rte_mp) == 0) {
499                         rte_mempool_free(rte_mp);
500                         rte_mp = NULL;
501                         goto err;
502                 }
503                 rte_pktmbuf_pool_init(rte_mp, NULL);
504                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
505         } else {
506                 /* wrapper to rte_mempool_create() */
507                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
508                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
509         }
510
511 err:
512         if (rte_mp == NULL) {
513                 rte_exit(EXIT_FAILURE,
514                         "Creation of mbuf pool for socket %u failed: %s\n",
515                         socket_id, rte_strerror(rte_errno));
516         } else if (verbose_level > 0) {
517                 rte_mempool_dump(stdout, rte_mp);
518         }
519 }
520
521 /*
522  * Check given socket id is valid or not with NUMA mode,
523  * if valid, return 0, else return -1
524  */
525 static int
526 check_socket_id(const unsigned int socket_id)
527 {
528         static int warning_once = 0;
529
530         if (new_socket_id(socket_id)) {
531                 if (!warning_once && numa_support)
532                         printf("Warning: NUMA should be configured manually by"
533                                " using --port-numa-config and"
534                                " --ring-numa-config parameters along with"
535                                " --numa.\n");
536                 warning_once = 1;
537                 return -1;
538         }
539         return 0;
540 }
541
542 static void
543 init_config(void)
544 {
545         portid_t pid;
546         struct rte_port *port;
547         struct rte_mempool *mbp;
548         unsigned int nb_mbuf_per_pool;
549         lcoreid_t  lc_id;
550         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
551         struct rte_gro_param gro_param;
552         uint32_t gso_types;
553
554         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
555
556         if (numa_support) {
557                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
558                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
559                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
560         }
561
562         /* Configuration of logical cores. */
563         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
564                                 sizeof(struct fwd_lcore *) * nb_lcores,
565                                 RTE_CACHE_LINE_SIZE);
566         if (fwd_lcores == NULL) {
567                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
568                                                         "failed\n", nb_lcores);
569         }
570         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
571                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
572                                                sizeof(struct fwd_lcore),
573                                                RTE_CACHE_LINE_SIZE);
574                 if (fwd_lcores[lc_id] == NULL) {
575                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
576                                                                 "failed\n");
577                 }
578                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
579         }
580
581         RTE_ETH_FOREACH_DEV(pid) {
582                 port = &ports[pid];
583                 rte_eth_dev_info_get(pid, &port->dev_info);
584
585                 if (numa_support) {
586                         if (port_numa[pid] != NUMA_NO_CONFIG)
587                                 port_per_socket[port_numa[pid]]++;
588                         else {
589                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
590
591                                 /* if socket_id is invalid, set to 0 */
592                                 if (check_socket_id(socket_id) < 0)
593                                         socket_id = 0;
594                                 port_per_socket[socket_id]++;
595                         }
596                 }
597
598                 /* set flag to initialize port/queue */
599                 port->need_reconfig = 1;
600                 port->need_reconfig_queues = 1;
601         }
602
603         /*
604          * Create pools of mbuf.
605          * If NUMA support is disabled, create a single pool of mbuf in
606          * socket 0 memory by default.
607          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
608          *
609          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
610          * nb_txd can be configured at run time.
611          */
612         if (param_total_num_mbufs)
613                 nb_mbuf_per_pool = param_total_num_mbufs;
614         else {
615                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
616                         (nb_lcores * mb_mempool_cache) +
617                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
618                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
619         }
620
621         if (numa_support) {
622                 uint8_t i;
623
624                 for (i = 0; i < num_sockets; i++)
625                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
626                                          socket_ids[i]);
627         } else {
628                 if (socket_num == UMA_NO_CONFIG)
629                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
630                 else
631                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
632                                                  socket_num);
633         }
634
635         init_port_config();
636
637         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
638                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
639         /*
640          * Records which Mbuf pool to use by each logical core, if needed.
641          */
642         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
643                 mbp = mbuf_pool_find(
644                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
645
646                 if (mbp == NULL)
647                         mbp = mbuf_pool_find(0);
648                 fwd_lcores[lc_id]->mbp = mbp;
649                 /* initialize GSO context */
650                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
651                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
652                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
653                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
654                         ETHER_CRC_LEN;
655                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
656         }
657
658         /* Configuration of packet forwarding streams. */
659         if (init_fwd_streams() < 0)
660                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
661
662         fwd_config_setup();
663
664         /* create a gro context for each lcore */
665         gro_param.gro_types = RTE_GRO_TCP_IPV4;
666         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
667         gro_param.max_item_per_flow = MAX_PKT_BURST;
668         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
669                 gro_param.socket_id = rte_lcore_to_socket_id(
670                                 fwd_lcores_cpuids[lc_id]);
671                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
672                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
673                         rte_exit(EXIT_FAILURE,
674                                         "rte_gro_ctx_create() failed\n");
675                 }
676         }
677 }
678
679
680 void
681 reconfig(portid_t new_port_id, unsigned socket_id)
682 {
683         struct rte_port *port;
684
685         /* Reconfiguration of Ethernet ports. */
686         port = &ports[new_port_id];
687         rte_eth_dev_info_get(new_port_id, &port->dev_info);
688
689         /* set flag to initialize port/queue */
690         port->need_reconfig = 1;
691         port->need_reconfig_queues = 1;
692         port->socket_id = socket_id;
693
694         init_port_config();
695 }
696
697
698 int
699 init_fwd_streams(void)
700 {
701         portid_t pid;
702         struct rte_port *port;
703         streamid_t sm_id, nb_fwd_streams_new;
704         queueid_t q;
705
706         /* set socket id according to numa or not */
707         RTE_ETH_FOREACH_DEV(pid) {
708                 port = &ports[pid];
709                 if (nb_rxq > port->dev_info.max_rx_queues) {
710                         printf("Fail: nb_rxq(%d) is greater than "
711                                 "max_rx_queues(%d)\n", nb_rxq,
712                                 port->dev_info.max_rx_queues);
713                         return -1;
714                 }
715                 if (nb_txq > port->dev_info.max_tx_queues) {
716                         printf("Fail: nb_txq(%d) is greater than "
717                                 "max_tx_queues(%d)\n", nb_txq,
718                                 port->dev_info.max_tx_queues);
719                         return -1;
720                 }
721                 if (numa_support) {
722                         if (port_numa[pid] != NUMA_NO_CONFIG)
723                                 port->socket_id = port_numa[pid];
724                         else {
725                                 port->socket_id = rte_eth_dev_socket_id(pid);
726
727                                 /* if socket_id is invalid, set to 0 */
728                                 if (check_socket_id(port->socket_id) < 0)
729                                         port->socket_id = 0;
730                         }
731                 }
732                 else {
733                         if (socket_num == UMA_NO_CONFIG)
734                                 port->socket_id = 0;
735                         else
736                                 port->socket_id = socket_num;
737                 }
738         }
739
740         q = RTE_MAX(nb_rxq, nb_txq);
741         if (q == 0) {
742                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
743                 return -1;
744         }
745         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
746         if (nb_fwd_streams_new == nb_fwd_streams)
747                 return 0;
748         /* clear the old */
749         if (fwd_streams != NULL) {
750                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
751                         if (fwd_streams[sm_id] == NULL)
752                                 continue;
753                         rte_free(fwd_streams[sm_id]);
754                         fwd_streams[sm_id] = NULL;
755                 }
756                 rte_free(fwd_streams);
757                 fwd_streams = NULL;
758         }
759
760         /* init new */
761         nb_fwd_streams = nb_fwd_streams_new;
762         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
763                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
764         if (fwd_streams == NULL)
765                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
766                                                 "failed\n", nb_fwd_streams);
767
768         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
769                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
770                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
771                 if (fwd_streams[sm_id] == NULL)
772                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
773                                                                 " failed\n");
774         }
775
776         return 0;
777 }
778
779 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
780 static void
781 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
782 {
783         unsigned int total_burst;
784         unsigned int nb_burst;
785         unsigned int burst_stats[3];
786         uint16_t pktnb_stats[3];
787         uint16_t nb_pkt;
788         int burst_percent[3];
789
790         /*
791          * First compute the total number of packet bursts and the
792          * two highest numbers of bursts of the same number of packets.
793          */
794         total_burst = 0;
795         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
796         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
797         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
798                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
799                 if (nb_burst == 0)
800                         continue;
801                 total_burst += nb_burst;
802                 if (nb_burst > burst_stats[0]) {
803                         burst_stats[1] = burst_stats[0];
804                         pktnb_stats[1] = pktnb_stats[0];
805                         burst_stats[0] = nb_burst;
806                         pktnb_stats[0] = nb_pkt;
807                 }
808         }
809         if (total_burst == 0)
810                 return;
811         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
812         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
813                burst_percent[0], (int) pktnb_stats[0]);
814         if (burst_stats[0] == total_burst) {
815                 printf("]\n");
816                 return;
817         }
818         if (burst_stats[0] + burst_stats[1] == total_burst) {
819                 printf(" + %d%% of %d pkts]\n",
820                        100 - burst_percent[0], pktnb_stats[1]);
821                 return;
822         }
823         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
824         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
825         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
826                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
827                 return;
828         }
829         printf(" + %d%% of %d pkts + %d%% of others]\n",
830                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
831 }
832 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
833
834 static void
835 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
836 {
837         struct rte_port *port;
838         uint8_t i;
839
840         static const char *fwd_stats_border = "----------------------";
841
842         port = &ports[port_id];
843         printf("\n  %s Forward statistics for port %-2d %s\n",
844                fwd_stats_border, port_id, fwd_stats_border);
845
846         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
847                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
848                        "%-"PRIu64"\n",
849                        stats->ipackets, stats->imissed,
850                        (uint64_t) (stats->ipackets + stats->imissed));
851
852                 if (cur_fwd_eng == &csum_fwd_engine)
853                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
854                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
855                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
856                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
857                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
858                 }
859
860                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
861                        "%-"PRIu64"\n",
862                        stats->opackets, port->tx_dropped,
863                        (uint64_t) (stats->opackets + port->tx_dropped));
864         }
865         else {
866                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
867                        "%14"PRIu64"\n",
868                        stats->ipackets, stats->imissed,
869                        (uint64_t) (stats->ipackets + stats->imissed));
870
871                 if (cur_fwd_eng == &csum_fwd_engine)
872                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
873                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
874                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
875                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
876                         printf("  RX-nombufs:             %14"PRIu64"\n",
877                                stats->rx_nombuf);
878                 }
879
880                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
881                        "%14"PRIu64"\n",
882                        stats->opackets, port->tx_dropped,
883                        (uint64_t) (stats->opackets + port->tx_dropped));
884         }
885
886 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
887         if (port->rx_stream)
888                 pkt_burst_stats_display("RX",
889                         &port->rx_stream->rx_burst_stats);
890         if (port->tx_stream)
891                 pkt_burst_stats_display("TX",
892                         &port->tx_stream->tx_burst_stats);
893 #endif
894
895         if (port->rx_queue_stats_mapping_enabled) {
896                 printf("\n");
897                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
898                         printf("  Stats reg %2d RX-packets:%14"PRIu64
899                                "     RX-errors:%14"PRIu64
900                                "    RX-bytes:%14"PRIu64"\n",
901                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
902                 }
903                 printf("\n");
904         }
905         if (port->tx_queue_stats_mapping_enabled) {
906                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
907                         printf("  Stats reg %2d TX-packets:%14"PRIu64
908                                "                                 TX-bytes:%14"PRIu64"\n",
909                                i, stats->q_opackets[i], stats->q_obytes[i]);
910                 }
911         }
912
913         printf("  %s--------------------------------%s\n",
914                fwd_stats_border, fwd_stats_border);
915 }
916
917 static void
918 fwd_stream_stats_display(streamid_t stream_id)
919 {
920         struct fwd_stream *fs;
921         static const char *fwd_top_stats_border = "-------";
922
923         fs = fwd_streams[stream_id];
924         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
925             (fs->fwd_dropped == 0))
926                 return;
927         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
928                "TX Port=%2d/Queue=%2d %s\n",
929                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
930                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
931         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
932                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
933
934         /* if checksum mode */
935         if (cur_fwd_eng == &csum_fwd_engine) {
936                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
937                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
938         }
939
940 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
941         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
942         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
943 #endif
944 }
945
946 static void
947 flush_fwd_rx_queues(void)
948 {
949         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
950         portid_t  rxp;
951         portid_t port_id;
952         queueid_t rxq;
953         uint16_t  nb_rx;
954         uint16_t  i;
955         uint8_t   j;
956         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
957         uint64_t timer_period;
958
959         /* convert to number of cycles */
960         timer_period = rte_get_timer_hz(); /* 1 second timeout */
961
962         for (j = 0; j < 2; j++) {
963                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
964                         for (rxq = 0; rxq < nb_rxq; rxq++) {
965                                 port_id = fwd_ports_ids[rxp];
966                                 /**
967                                 * testpmd can stuck in the below do while loop
968                                 * if rte_eth_rx_burst() always returns nonzero
969                                 * packets. So timer is added to exit this loop
970                                 * after 1sec timer expiry.
971                                 */
972                                 prev_tsc = rte_rdtsc();
973                                 do {
974                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
975                                                 pkts_burst, MAX_PKT_BURST);
976                                         for (i = 0; i < nb_rx; i++)
977                                                 rte_pktmbuf_free(pkts_burst[i]);
978
979                                         cur_tsc = rte_rdtsc();
980                                         diff_tsc = cur_tsc - prev_tsc;
981                                         timer_tsc += diff_tsc;
982                                 } while ((nb_rx > 0) &&
983                                         (timer_tsc < timer_period));
984                                 timer_tsc = 0;
985                         }
986                 }
987                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
988         }
989 }
990
991 static void
992 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
993 {
994         struct fwd_stream **fsm;
995         streamid_t nb_fs;
996         streamid_t sm_id;
997 #ifdef RTE_LIBRTE_BITRATE
998         uint64_t tics_per_1sec;
999         uint64_t tics_datum;
1000         uint64_t tics_current;
1001         uint8_t idx_port, cnt_ports;
1002
1003         cnt_ports = rte_eth_dev_count();
1004         tics_datum = rte_rdtsc();
1005         tics_per_1sec = rte_get_timer_hz();
1006 #endif
1007         fsm = &fwd_streams[fc->stream_idx];
1008         nb_fs = fc->stream_nb;
1009         do {
1010                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1011                         (*pkt_fwd)(fsm[sm_id]);
1012 #ifdef RTE_LIBRTE_BITRATE
1013                 if (bitrate_enabled != 0 &&
1014                                 bitrate_lcore_id == rte_lcore_id()) {
1015                         tics_current = rte_rdtsc();
1016                         if (tics_current - tics_datum >= tics_per_1sec) {
1017                                 /* Periodic bitrate calculation */
1018                                 for (idx_port = 0;
1019                                                 idx_port < cnt_ports;
1020                                                 idx_port++)
1021                                         rte_stats_bitrate_calc(bitrate_data,
1022                                                 idx_port);
1023                                 tics_datum = tics_current;
1024                         }
1025                 }
1026 #endif
1027 #ifdef RTE_LIBRTE_LATENCY_STATS
1028                 if (latencystats_enabled != 0 &&
1029                                 latencystats_lcore_id == rte_lcore_id())
1030                         rte_latencystats_update();
1031 #endif
1032
1033         } while (! fc->stopped);
1034 }
1035
1036 static int
1037 start_pkt_forward_on_core(void *fwd_arg)
1038 {
1039         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1040                              cur_fwd_config.fwd_eng->packet_fwd);
1041         return 0;
1042 }
1043
1044 /*
1045  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1046  * Used to start communication flows in network loopback test configurations.
1047  */
1048 static int
1049 run_one_txonly_burst_on_core(void *fwd_arg)
1050 {
1051         struct fwd_lcore *fwd_lc;
1052         struct fwd_lcore tmp_lcore;
1053
1054         fwd_lc = (struct fwd_lcore *) fwd_arg;
1055         tmp_lcore = *fwd_lc;
1056         tmp_lcore.stopped = 1;
1057         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1058         return 0;
1059 }
1060
1061 /*
1062  * Launch packet forwarding:
1063  *     - Setup per-port forwarding context.
1064  *     - launch logical cores with their forwarding configuration.
1065  */
1066 static void
1067 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1068 {
1069         port_fwd_begin_t port_fwd_begin;
1070         unsigned int i;
1071         unsigned int lc_id;
1072         int diag;
1073
1074         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1075         if (port_fwd_begin != NULL) {
1076                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1077                         (*port_fwd_begin)(fwd_ports_ids[i]);
1078         }
1079         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1080                 lc_id = fwd_lcores_cpuids[i];
1081                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1082                         fwd_lcores[i]->stopped = 0;
1083                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1084                                                      fwd_lcores[i], lc_id);
1085                         if (diag != 0)
1086                                 printf("launch lcore %u failed - diag=%d\n",
1087                                        lc_id, diag);
1088                 }
1089         }
1090 }
1091
1092 /*
1093  * Launch packet forwarding configuration.
1094  */
1095 void
1096 start_packet_forwarding(int with_tx_first)
1097 {
1098         port_fwd_begin_t port_fwd_begin;
1099         port_fwd_end_t  port_fwd_end;
1100         struct rte_port *port;
1101         unsigned int i;
1102         portid_t   pt_id;
1103         streamid_t sm_id;
1104
1105         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1106                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1107
1108         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1109                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1110
1111         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1112                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1113                 (!nb_rxq || !nb_txq))
1114                 rte_exit(EXIT_FAILURE,
1115                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1116                         cur_fwd_eng->fwd_mode_name);
1117
1118         if (all_ports_started() == 0) {
1119                 printf("Not all ports were started\n");
1120                 return;
1121         }
1122         if (test_done == 0) {
1123                 printf("Packet forwarding already started\n");
1124                 return;
1125         }
1126
1127         if (init_fwd_streams() < 0) {
1128                 printf("Fail from init_fwd_streams()\n");
1129                 return;
1130         }
1131
1132         if(dcb_test) {
1133                 for (i = 0; i < nb_fwd_ports; i++) {
1134                         pt_id = fwd_ports_ids[i];
1135                         port = &ports[pt_id];
1136                         if (!port->dcb_flag) {
1137                                 printf("In DCB mode, all forwarding ports must "
1138                                        "be configured in this mode.\n");
1139                                 return;
1140                         }
1141                 }
1142                 if (nb_fwd_lcores == 1) {
1143                         printf("In DCB mode,the nb forwarding cores "
1144                                "should be larger than 1.\n");
1145                         return;
1146                 }
1147         }
1148         test_done = 0;
1149
1150         if(!no_flush_rx)
1151                 flush_fwd_rx_queues();
1152
1153         fwd_config_setup();
1154         pkt_fwd_config_display(&cur_fwd_config);
1155         rxtx_config_display();
1156
1157         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1158                 pt_id = fwd_ports_ids[i];
1159                 port = &ports[pt_id];
1160                 rte_eth_stats_get(pt_id, &port->stats);
1161                 port->tx_dropped = 0;
1162
1163                 map_port_queue_stats_mapping_registers(pt_id, port);
1164         }
1165         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1166                 fwd_streams[sm_id]->rx_packets = 0;
1167                 fwd_streams[sm_id]->tx_packets = 0;
1168                 fwd_streams[sm_id]->fwd_dropped = 0;
1169                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1170                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1171
1172 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1173                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1174                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1175                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1176                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1177 #endif
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179                 fwd_streams[sm_id]->core_cycles = 0;
1180 #endif
1181         }
1182         if (with_tx_first) {
1183                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1184                 if (port_fwd_begin != NULL) {
1185                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1186                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1187                 }
1188                 while (with_tx_first--) {
1189                         launch_packet_forwarding(
1190                                         run_one_txonly_burst_on_core);
1191                         rte_eal_mp_wait_lcore();
1192                 }
1193                 port_fwd_end = tx_only_engine.port_fwd_end;
1194                 if (port_fwd_end != NULL) {
1195                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1196                                 (*port_fwd_end)(fwd_ports_ids[i]);
1197                 }
1198         }
1199         launch_packet_forwarding(start_pkt_forward_on_core);
1200 }
1201
1202 void
1203 stop_packet_forwarding(void)
1204 {
1205         struct rte_eth_stats stats;
1206         struct rte_port *port;
1207         port_fwd_end_t  port_fwd_end;
1208         int i;
1209         portid_t   pt_id;
1210         streamid_t sm_id;
1211         lcoreid_t  lc_id;
1212         uint64_t total_recv;
1213         uint64_t total_xmit;
1214         uint64_t total_rx_dropped;
1215         uint64_t total_tx_dropped;
1216         uint64_t total_rx_nombuf;
1217         uint64_t tx_dropped;
1218         uint64_t rx_bad_ip_csum;
1219         uint64_t rx_bad_l4_csum;
1220 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1221         uint64_t fwd_cycles;
1222 #endif
1223
1224         static const char *acc_stats_border = "+++++++++++++++";
1225
1226         if (test_done) {
1227                 printf("Packet forwarding not started\n");
1228                 return;
1229         }
1230         printf("Telling cores to stop...");
1231         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1232                 fwd_lcores[lc_id]->stopped = 1;
1233         printf("\nWaiting for lcores to finish...\n");
1234         rte_eal_mp_wait_lcore();
1235         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1236         if (port_fwd_end != NULL) {
1237                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1238                         pt_id = fwd_ports_ids[i];
1239                         (*port_fwd_end)(pt_id);
1240                 }
1241         }
1242 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1243         fwd_cycles = 0;
1244 #endif
1245         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1246                 if (cur_fwd_config.nb_fwd_streams >
1247                     cur_fwd_config.nb_fwd_ports) {
1248                         fwd_stream_stats_display(sm_id);
1249                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1250                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1251                 } else {
1252                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1253                                 fwd_streams[sm_id];
1254                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1255                                 fwd_streams[sm_id];
1256                 }
1257                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1258                 tx_dropped = (uint64_t) (tx_dropped +
1259                                          fwd_streams[sm_id]->fwd_dropped);
1260                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1261
1262                 rx_bad_ip_csum =
1263                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1264                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1265                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1266                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1267                                                         rx_bad_ip_csum;
1268
1269                 rx_bad_l4_csum =
1270                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1271                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1272                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1273                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1274                                                         rx_bad_l4_csum;
1275
1276 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1277                 fwd_cycles = (uint64_t) (fwd_cycles +
1278                                          fwd_streams[sm_id]->core_cycles);
1279 #endif
1280         }
1281         total_recv = 0;
1282         total_xmit = 0;
1283         total_rx_dropped = 0;
1284         total_tx_dropped = 0;
1285         total_rx_nombuf  = 0;
1286         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1287                 pt_id = fwd_ports_ids[i];
1288
1289                 port = &ports[pt_id];
1290                 rte_eth_stats_get(pt_id, &stats);
1291                 stats.ipackets -= port->stats.ipackets;
1292                 port->stats.ipackets = 0;
1293                 stats.opackets -= port->stats.opackets;
1294                 port->stats.opackets = 0;
1295                 stats.ibytes   -= port->stats.ibytes;
1296                 port->stats.ibytes = 0;
1297                 stats.obytes   -= port->stats.obytes;
1298                 port->stats.obytes = 0;
1299                 stats.imissed  -= port->stats.imissed;
1300                 port->stats.imissed = 0;
1301                 stats.oerrors  -= port->stats.oerrors;
1302                 port->stats.oerrors = 0;
1303                 stats.rx_nombuf -= port->stats.rx_nombuf;
1304                 port->stats.rx_nombuf = 0;
1305
1306                 total_recv += stats.ipackets;
1307                 total_xmit += stats.opackets;
1308                 total_rx_dropped += stats.imissed;
1309                 total_tx_dropped += port->tx_dropped;
1310                 total_rx_nombuf  += stats.rx_nombuf;
1311
1312                 fwd_port_stats_display(pt_id, &stats);
1313         }
1314
1315         printf("\n  %s Accumulated forward statistics for all ports"
1316                "%s\n",
1317                acc_stats_border, acc_stats_border);
1318         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1319                "%-"PRIu64"\n"
1320                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1321                "%-"PRIu64"\n",
1322                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1323                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1324         if (total_rx_nombuf > 0)
1325                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1326         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1327                "%s\n",
1328                acc_stats_border, acc_stats_border);
1329 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1330         if (total_recv > 0)
1331                 printf("\n  CPU cycles/packet=%u (total cycles="
1332                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1333                        (unsigned int)(fwd_cycles / total_recv),
1334                        fwd_cycles, total_recv);
1335 #endif
1336         printf("\nDone.\n");
1337         test_done = 1;
1338 }
1339
1340 void
1341 dev_set_link_up(portid_t pid)
1342 {
1343         if (rte_eth_dev_set_link_up(pid) < 0)
1344                 printf("\nSet link up fail.\n");
1345 }
1346
1347 void
1348 dev_set_link_down(portid_t pid)
1349 {
1350         if (rte_eth_dev_set_link_down(pid) < 0)
1351                 printf("\nSet link down fail.\n");
1352 }
1353
1354 static int
1355 all_ports_started(void)
1356 {
1357         portid_t pi;
1358         struct rte_port *port;
1359
1360         RTE_ETH_FOREACH_DEV(pi) {
1361                 port = &ports[pi];
1362                 /* Check if there is a port which is not started */
1363                 if ((port->port_status != RTE_PORT_STARTED) &&
1364                         (port->slave_flag == 0))
1365                         return 0;
1366         }
1367
1368         /* No port is not started */
1369         return 1;
1370 }
1371
1372 int
1373 all_ports_stopped(void)
1374 {
1375         portid_t pi;
1376         struct rte_port *port;
1377
1378         RTE_ETH_FOREACH_DEV(pi) {
1379                 port = &ports[pi];
1380                 if ((port->port_status != RTE_PORT_STOPPED) &&
1381                         (port->slave_flag == 0))
1382                         return 0;
1383         }
1384
1385         return 1;
1386 }
1387
1388 int
1389 port_is_started(portid_t port_id)
1390 {
1391         if (port_id_is_invalid(port_id, ENABLED_WARN))
1392                 return 0;
1393
1394         if (ports[port_id].port_status != RTE_PORT_STARTED)
1395                 return 0;
1396
1397         return 1;
1398 }
1399
1400 static int
1401 port_is_closed(portid_t port_id)
1402 {
1403         if (port_id_is_invalid(port_id, ENABLED_WARN))
1404                 return 0;
1405
1406         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1407                 return 0;
1408
1409         return 1;
1410 }
1411
1412 int
1413 start_port(portid_t pid)
1414 {
1415         int diag, need_check_link_status = -1;
1416         portid_t pi;
1417         queueid_t qi;
1418         struct rte_port *port;
1419         struct ether_addr mac_addr;
1420         enum rte_eth_event_type event_type;
1421
1422         if (port_id_is_invalid(pid, ENABLED_WARN))
1423                 return 0;
1424
1425         if(dcb_config)
1426                 dcb_test = 1;
1427         RTE_ETH_FOREACH_DEV(pi) {
1428                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1429                         continue;
1430
1431                 need_check_link_status = 0;
1432                 port = &ports[pi];
1433                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1434                                                  RTE_PORT_HANDLING) == 0) {
1435                         printf("Port %d is now not stopped\n", pi);
1436                         continue;
1437                 }
1438
1439                 if (port->need_reconfig > 0) {
1440                         port->need_reconfig = 0;
1441
1442                         if (flow_isolate_all) {
1443                                 int ret = port_flow_isolate(pi, 1);
1444                                 if (ret) {
1445                                         printf("Failed to apply isolated"
1446                                                " mode on port %d\n", pi);
1447                                         return -1;
1448                                 }
1449                         }
1450
1451                         printf("Configuring Port %d (socket %u)\n", pi,
1452                                         port->socket_id);
1453                         /* configure port */
1454                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1455                                                 &(port->dev_conf));
1456                         if (diag != 0) {
1457                                 if (rte_atomic16_cmpset(&(port->port_status),
1458                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1459                                         printf("Port %d can not be set back "
1460                                                         "to stopped\n", pi);
1461                                 printf("Fail to configure port %d\n", pi);
1462                                 /* try to reconfigure port next time */
1463                                 port->need_reconfig = 1;
1464                                 return -1;
1465                         }
1466                 }
1467                 if (port->need_reconfig_queues > 0) {
1468                         port->need_reconfig_queues = 0;
1469                         /* setup tx queues */
1470                         for (qi = 0; qi < nb_txq; qi++) {
1471                                 if ((numa_support) &&
1472                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1473                                         diag = rte_eth_tx_queue_setup(pi, qi,
1474                                                 nb_txd,txring_numa[pi],
1475                                                 &(port->tx_conf));
1476                                 else
1477                                         diag = rte_eth_tx_queue_setup(pi, qi,
1478                                                 nb_txd,port->socket_id,
1479                                                 &(port->tx_conf));
1480
1481                                 if (diag == 0)
1482                                         continue;
1483
1484                                 /* Fail to setup tx queue, return */
1485                                 if (rte_atomic16_cmpset(&(port->port_status),
1486                                                         RTE_PORT_HANDLING,
1487                                                         RTE_PORT_STOPPED) == 0)
1488                                         printf("Port %d can not be set back "
1489                                                         "to stopped\n", pi);
1490                                 printf("Fail to configure port %d tx queues\n", pi);
1491                                 /* try to reconfigure queues next time */
1492                                 port->need_reconfig_queues = 1;
1493                                 return -1;
1494                         }
1495                         /* setup rx queues */
1496                         for (qi = 0; qi < nb_rxq; qi++) {
1497                                 if ((numa_support) &&
1498                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1499                                         struct rte_mempool * mp =
1500                                                 mbuf_pool_find(rxring_numa[pi]);
1501                                         if (mp == NULL) {
1502                                                 printf("Failed to setup RX queue:"
1503                                                         "No mempool allocation"
1504                                                         " on the socket %d\n",
1505                                                         rxring_numa[pi]);
1506                                                 return -1;
1507                                         }
1508
1509                                         diag = rte_eth_rx_queue_setup(pi, qi,
1510                                              nb_rxd,rxring_numa[pi],
1511                                              &(port->rx_conf),mp);
1512                                 } else {
1513                                         struct rte_mempool *mp =
1514                                                 mbuf_pool_find(port->socket_id);
1515                                         if (mp == NULL) {
1516                                                 printf("Failed to setup RX queue:"
1517                                                         "No mempool allocation"
1518                                                         " on the socket %d\n",
1519                                                         port->socket_id);
1520                                                 return -1;
1521                                         }
1522                                         diag = rte_eth_rx_queue_setup(pi, qi,
1523                                              nb_rxd,port->socket_id,
1524                                              &(port->rx_conf), mp);
1525                                 }
1526                                 if (diag == 0)
1527                                         continue;
1528
1529                                 /* Fail to setup rx queue, return */
1530                                 if (rte_atomic16_cmpset(&(port->port_status),
1531                                                         RTE_PORT_HANDLING,
1532                                                         RTE_PORT_STOPPED) == 0)
1533                                         printf("Port %d can not be set back "
1534                                                         "to stopped\n", pi);
1535                                 printf("Fail to configure port %d rx queues\n", pi);
1536                                 /* try to reconfigure queues next time */
1537                                 port->need_reconfig_queues = 1;
1538                                 return -1;
1539                         }
1540                 }
1541
1542                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1543                      event_type < RTE_ETH_EVENT_MAX;
1544                      event_type++) {
1545                         diag = rte_eth_dev_callback_register(pi,
1546                                                         event_type,
1547                                                         eth_event_callback,
1548                                                         NULL);
1549                         if (diag) {
1550                                 printf("Failed to setup even callback for event %d\n",
1551                                         event_type);
1552                                 return -1;
1553                         }
1554                 }
1555
1556                 /* start port */
1557                 if (rte_eth_dev_start(pi) < 0) {
1558                         printf("Fail to start port %d\n", pi);
1559
1560                         /* Fail to setup rx queue, return */
1561                         if (rte_atomic16_cmpset(&(port->port_status),
1562                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1563                                 printf("Port %d can not be set back to "
1564                                                         "stopped\n", pi);
1565                         continue;
1566                 }
1567
1568                 if (rte_atomic16_cmpset(&(port->port_status),
1569                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1570                         printf("Port %d can not be set into started\n", pi);
1571
1572                 rte_eth_macaddr_get(pi, &mac_addr);
1573                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1574                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1575                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1576                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1577
1578                 /* at least one port started, need checking link status */
1579                 need_check_link_status = 1;
1580         }
1581
1582         if (need_check_link_status == 1 && !no_link_check)
1583                 check_all_ports_link_status(RTE_PORT_ALL);
1584         else if (need_check_link_status == 0)
1585                 printf("Please stop the ports first\n");
1586
1587         printf("Done\n");
1588         return 0;
1589 }
1590
1591 void
1592 stop_port(portid_t pid)
1593 {
1594         portid_t pi;
1595         struct rte_port *port;
1596         int need_check_link_status = 0;
1597
1598         if (dcb_test) {
1599                 dcb_test = 0;
1600                 dcb_config = 0;
1601         }
1602
1603         if (port_id_is_invalid(pid, ENABLED_WARN))
1604                 return;
1605
1606         printf("Stopping ports...\n");
1607
1608         RTE_ETH_FOREACH_DEV(pi) {
1609                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1610                         continue;
1611
1612                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1613                         printf("Please remove port %d from forwarding configuration.\n", pi);
1614                         continue;
1615                 }
1616
1617                 if (port_is_bonding_slave(pi)) {
1618                         printf("Please remove port %d from bonded device.\n", pi);
1619                         continue;
1620                 }
1621
1622                 port = &ports[pi];
1623                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1624                                                 RTE_PORT_HANDLING) == 0)
1625                         continue;
1626
1627                 rte_eth_dev_stop(pi);
1628
1629                 if (rte_atomic16_cmpset(&(port->port_status),
1630                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1631                         printf("Port %d can not be set into stopped\n", pi);
1632                 need_check_link_status = 1;
1633         }
1634         if (need_check_link_status && !no_link_check)
1635                 check_all_ports_link_status(RTE_PORT_ALL);
1636
1637         printf("Done\n");
1638 }
1639
1640 void
1641 close_port(portid_t pid)
1642 {
1643         portid_t pi;
1644         struct rte_port *port;
1645
1646         if (port_id_is_invalid(pid, ENABLED_WARN))
1647                 return;
1648
1649         printf("Closing ports...\n");
1650
1651         RTE_ETH_FOREACH_DEV(pi) {
1652                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1653                         continue;
1654
1655                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1656                         printf("Please remove port %d from forwarding configuration.\n", pi);
1657                         continue;
1658                 }
1659
1660                 if (port_is_bonding_slave(pi)) {
1661                         printf("Please remove port %d from bonded device.\n", pi);
1662                         continue;
1663                 }
1664
1665                 port = &ports[pi];
1666                 if (rte_atomic16_cmpset(&(port->port_status),
1667                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1668                         printf("Port %d is already closed\n", pi);
1669                         continue;
1670                 }
1671
1672                 if (rte_atomic16_cmpset(&(port->port_status),
1673                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1674                         printf("Port %d is now not stopped\n", pi);
1675                         continue;
1676                 }
1677
1678                 if (port->flow_list)
1679                         port_flow_flush(pi);
1680                 rte_eth_dev_close(pi);
1681
1682                 if (rte_atomic16_cmpset(&(port->port_status),
1683                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1684                         printf("Port %d cannot be set to closed\n", pi);
1685         }
1686
1687         printf("Done\n");
1688 }
1689
1690 void
1691 reset_port(portid_t pid)
1692 {
1693         int diag;
1694         portid_t pi;
1695         struct rte_port *port;
1696
1697         if (port_id_is_invalid(pid, ENABLED_WARN))
1698                 return;
1699
1700         printf("Resetting ports...\n");
1701
1702         RTE_ETH_FOREACH_DEV(pi) {
1703                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1704                         continue;
1705
1706                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1707                         printf("Please remove port %d from forwarding "
1708                                "configuration.\n", pi);
1709                         continue;
1710                 }
1711
1712                 if (port_is_bonding_slave(pi)) {
1713                         printf("Please remove port %d from bonded device.\n",
1714                                pi);
1715                         continue;
1716                 }
1717
1718                 diag = rte_eth_dev_reset(pi);
1719                 if (diag == 0) {
1720                         port = &ports[pi];
1721                         port->need_reconfig = 1;
1722                         port->need_reconfig_queues = 1;
1723                 } else {
1724                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1725                 }
1726         }
1727
1728         printf("Done\n");
1729 }
1730
1731 void
1732 attach_port(char *identifier)
1733 {
1734         portid_t pi = 0;
1735         unsigned int socket_id;
1736
1737         printf("Attaching a new port...\n");
1738
1739         if (identifier == NULL) {
1740                 printf("Invalid parameters are specified\n");
1741                 return;
1742         }
1743
1744         if (rte_eth_dev_attach(identifier, &pi))
1745                 return;
1746
1747         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1748         /* if socket_id is invalid, set to 0 */
1749         if (check_socket_id(socket_id) < 0)
1750                 socket_id = 0;
1751         reconfig(pi, socket_id);
1752         rte_eth_promiscuous_enable(pi);
1753
1754         nb_ports = rte_eth_dev_count();
1755
1756         ports[pi].port_status = RTE_PORT_STOPPED;
1757
1758         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1759         printf("Done\n");
1760 }
1761
1762 void
1763 detach_port(portid_t port_id)
1764 {
1765         char name[RTE_ETH_NAME_MAX_LEN];
1766
1767         printf("Detaching a port...\n");
1768
1769         if (!port_is_closed(port_id)) {
1770                 printf("Please close port first\n");
1771                 return;
1772         }
1773
1774         if (ports[port_id].flow_list)
1775                 port_flow_flush(port_id);
1776
1777         if (rte_eth_dev_detach(port_id, name)) {
1778                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1779                 return;
1780         }
1781
1782         nb_ports = rte_eth_dev_count();
1783
1784         printf("Port '%s' is detached. Now total ports is %d\n",
1785                         name, nb_ports);
1786         printf("Done\n");
1787         return;
1788 }
1789
1790 void
1791 pmd_test_exit(void)
1792 {
1793         portid_t pt_id;
1794
1795         if (test_done == 0)
1796                 stop_packet_forwarding();
1797
1798         if (ports != NULL) {
1799                 no_link_check = 1;
1800                 RTE_ETH_FOREACH_DEV(pt_id) {
1801                         printf("\nShutting down port %d...\n", pt_id);
1802                         fflush(stdout);
1803                         stop_port(pt_id);
1804                         close_port(pt_id);
1805                 }
1806         }
1807         printf("\nBye...\n");
1808 }
1809
1810 typedef void (*cmd_func_t)(void);
1811 struct pmd_test_command {
1812         const char *cmd_name;
1813         cmd_func_t cmd_func;
1814 };
1815
1816 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1817
1818 /* Check the link status of all ports in up to 9s, and print them finally */
1819 static void
1820 check_all_ports_link_status(uint32_t port_mask)
1821 {
1822 #define CHECK_INTERVAL 100 /* 100ms */
1823 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1824         portid_t portid;
1825         uint8_t count, all_ports_up, print_flag = 0;
1826         struct rte_eth_link link;
1827
1828         printf("Checking link statuses...\n");
1829         fflush(stdout);
1830         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1831                 all_ports_up = 1;
1832                 RTE_ETH_FOREACH_DEV(portid) {
1833                         if ((port_mask & (1 << portid)) == 0)
1834                                 continue;
1835                         memset(&link, 0, sizeof(link));
1836                         rte_eth_link_get_nowait(portid, &link);
1837                         /* print link status if flag set */
1838                         if (print_flag == 1) {
1839                                 if (link.link_status)
1840                                         printf(
1841                                         "Port%d Link Up. speed %u Mbps- %s\n",
1842                                         portid, link.link_speed,
1843                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1844                                         ("full-duplex") : ("half-duplex\n"));
1845                                 else
1846                                         printf("Port %d Link Down\n", portid);
1847                                 continue;
1848                         }
1849                         /* clear all_ports_up flag if any link down */
1850                         if (link.link_status == ETH_LINK_DOWN) {
1851                                 all_ports_up = 0;
1852                                 break;
1853                         }
1854                 }
1855                 /* after finally printing all link status, get out */
1856                 if (print_flag == 1)
1857                         break;
1858
1859                 if (all_ports_up == 0) {
1860                         fflush(stdout);
1861                         rte_delay_ms(CHECK_INTERVAL);
1862                 }
1863
1864                 /* set the print_flag if all ports up or timeout */
1865                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1866                         print_flag = 1;
1867                 }
1868
1869                 if (lsc_interrupt)
1870                         break;
1871         }
1872 }
1873
1874 static void
1875 rmv_event_callback(void *arg)
1876 {
1877         struct rte_eth_dev *dev;
1878         portid_t port_id = (intptr_t)arg;
1879
1880         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1881         dev = &rte_eth_devices[port_id];
1882
1883         stop_port(port_id);
1884         close_port(port_id);
1885         printf("removing device %s\n", dev->device->name);
1886         if (rte_eal_dev_detach(dev->device))
1887                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1888                         dev->device->name);
1889 }
1890
1891 /* This function is used by the interrupt thread */
1892 static int
1893 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1894                   void *ret_param)
1895 {
1896         static const char * const event_desc[] = {
1897                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1898                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1899                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1900                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1901                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1902                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1903                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1904                 [RTE_ETH_EVENT_MAX] = NULL,
1905         };
1906
1907         RTE_SET_USED(param);
1908         RTE_SET_USED(ret_param);
1909
1910         if (type >= RTE_ETH_EVENT_MAX) {
1911                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1912                         port_id, __func__, type);
1913                 fflush(stderr);
1914         } else if (event_print_mask & (UINT32_C(1) << type)) {
1915                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1916                         event_desc[type]);
1917                 fflush(stdout);
1918         }
1919
1920         switch (type) {
1921         case RTE_ETH_EVENT_INTR_RMV:
1922                 if (rte_eal_alarm_set(100000,
1923                                 rmv_event_callback, (void *)(intptr_t)port_id))
1924                         fprintf(stderr, "Could not set up deferred device removal\n");
1925                 break;
1926         default:
1927                 break;
1928         }
1929         return 0;
1930 }
1931
1932 static int
1933 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1934 {
1935         uint16_t i;
1936         int diag;
1937         uint8_t mapping_found = 0;
1938
1939         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1940                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1941                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1942                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1943                                         tx_queue_stats_mappings[i].queue_id,
1944                                         tx_queue_stats_mappings[i].stats_counter_id);
1945                         if (diag != 0)
1946                                 return diag;
1947                         mapping_found = 1;
1948                 }
1949         }
1950         if (mapping_found)
1951                 port->tx_queue_stats_mapping_enabled = 1;
1952         return 0;
1953 }
1954
1955 static int
1956 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1957 {
1958         uint16_t i;
1959         int diag;
1960         uint8_t mapping_found = 0;
1961
1962         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1963                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1964                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1965                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1966                                         rx_queue_stats_mappings[i].queue_id,
1967                                         rx_queue_stats_mappings[i].stats_counter_id);
1968                         if (diag != 0)
1969                                 return diag;
1970                         mapping_found = 1;
1971                 }
1972         }
1973         if (mapping_found)
1974                 port->rx_queue_stats_mapping_enabled = 1;
1975         return 0;
1976 }
1977
1978 static void
1979 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1980 {
1981         int diag = 0;
1982
1983         diag = set_tx_queue_stats_mapping_registers(pi, port);
1984         if (diag != 0) {
1985                 if (diag == -ENOTSUP) {
1986                         port->tx_queue_stats_mapping_enabled = 0;
1987                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1988                 }
1989                 else
1990                         rte_exit(EXIT_FAILURE,
1991                                         "set_tx_queue_stats_mapping_registers "
1992                                         "failed for port id=%d diag=%d\n",
1993                                         pi, diag);
1994         }
1995
1996         diag = set_rx_queue_stats_mapping_registers(pi, port);
1997         if (diag != 0) {
1998                 if (diag == -ENOTSUP) {
1999                         port->rx_queue_stats_mapping_enabled = 0;
2000                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2001                 }
2002                 else
2003                         rte_exit(EXIT_FAILURE,
2004                                         "set_rx_queue_stats_mapping_registers "
2005                                         "failed for port id=%d diag=%d\n",
2006                                         pi, diag);
2007         }
2008 }
2009
2010 static void
2011 rxtx_port_config(struct rte_port *port)
2012 {
2013         port->rx_conf = port->dev_info.default_rxconf;
2014         port->tx_conf = port->dev_info.default_txconf;
2015
2016         /* Check if any RX/TX parameters have been passed */
2017         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2018                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2019
2020         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2021                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2022
2023         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2024                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2025
2026         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2027                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2028
2029         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2030                 port->rx_conf.rx_drop_en = rx_drop_en;
2031
2032         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2033                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2034
2035         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2036                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2037
2038         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2039                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2040
2041         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2042                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2043
2044         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2045                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2046
2047         if (txq_flags != RTE_PMD_PARAM_UNSET)
2048                 port->tx_conf.txq_flags = txq_flags;
2049 }
2050
2051 void
2052 init_port_config(void)
2053 {
2054         portid_t pid;
2055         struct rte_port *port;
2056
2057         RTE_ETH_FOREACH_DEV(pid) {
2058                 port = &ports[pid];
2059                 port->dev_conf.rxmode = rx_mode;
2060                 port->dev_conf.fdir_conf = fdir_conf;
2061                 if (nb_rxq > 1) {
2062                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2063                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2064                 } else {
2065                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2066                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2067                 }
2068
2069                 if (port->dcb_flag == 0) {
2070                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2071                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2072                         else
2073                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2074                 }
2075
2076                 rxtx_port_config(port);
2077
2078                 rte_eth_macaddr_get(pid, &port->eth_addr);
2079
2080                 map_port_queue_stats_mapping_registers(pid, port);
2081 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2082                 rte_pmd_ixgbe_bypass_init(pid);
2083 #endif
2084
2085                 if (lsc_interrupt &&
2086                     (rte_eth_devices[pid].data->dev_flags &
2087                      RTE_ETH_DEV_INTR_LSC))
2088                         port->dev_conf.intr_conf.lsc = 1;
2089                 if (rmv_interrupt &&
2090                     (rte_eth_devices[pid].data->dev_flags &
2091                      RTE_ETH_DEV_INTR_RMV))
2092                         port->dev_conf.intr_conf.rmv = 1;
2093
2094 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2095                 /* Detect softnic port */
2096                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2097                         port->softnic_enable = 1;
2098                         memset(&port->softport, 0, sizeof(struct softnic_port));
2099
2100                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2101                                 port->softport.tm_flag = 1;
2102                 }
2103 #endif
2104         }
2105 }
2106
2107 void set_port_slave_flag(portid_t slave_pid)
2108 {
2109         struct rte_port *port;
2110
2111         port = &ports[slave_pid];
2112         port->slave_flag = 1;
2113 }
2114
2115 void clear_port_slave_flag(portid_t slave_pid)
2116 {
2117         struct rte_port *port;
2118
2119         port = &ports[slave_pid];
2120         port->slave_flag = 0;
2121 }
2122
2123 uint8_t port_is_bonding_slave(portid_t slave_pid)
2124 {
2125         struct rte_port *port;
2126
2127         port = &ports[slave_pid];
2128         return port->slave_flag;
2129 }
2130
2131 const uint16_t vlan_tags[] = {
2132                 0,  1,  2,  3,  4,  5,  6,  7,
2133                 8,  9, 10, 11,  12, 13, 14, 15,
2134                 16, 17, 18, 19, 20, 21, 22, 23,
2135                 24, 25, 26, 27, 28, 29, 30, 31
2136 };
2137
2138 static  int
2139 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2140                  enum dcb_mode_enable dcb_mode,
2141                  enum rte_eth_nb_tcs num_tcs,
2142                  uint8_t pfc_en)
2143 {
2144         uint8_t i;
2145
2146         /*
2147          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2148          * given above, and the number of traffic classes available for use.
2149          */
2150         if (dcb_mode == DCB_VT_ENABLED) {
2151                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2152                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2153                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2154                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2155
2156                 /* VMDQ+DCB RX and TX configurations */
2157                 vmdq_rx_conf->enable_default_pool = 0;
2158                 vmdq_rx_conf->default_pool = 0;
2159                 vmdq_rx_conf->nb_queue_pools =
2160                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2161                 vmdq_tx_conf->nb_queue_pools =
2162                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2163
2164                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2165                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2166                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2167                         vmdq_rx_conf->pool_map[i].pools =
2168                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2169                 }
2170                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2171                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2172                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2173                 }
2174
2175                 /* set DCB mode of RX and TX of multiple queues */
2176                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2177                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2178         } else {
2179                 struct rte_eth_dcb_rx_conf *rx_conf =
2180                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2181                 struct rte_eth_dcb_tx_conf *tx_conf =
2182                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2183
2184                 rx_conf->nb_tcs = num_tcs;
2185                 tx_conf->nb_tcs = num_tcs;
2186
2187                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2188                         rx_conf->dcb_tc[i] = i % num_tcs;
2189                         tx_conf->dcb_tc[i] = i % num_tcs;
2190                 }
2191                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2192                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2193                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2194         }
2195
2196         if (pfc_en)
2197                 eth_conf->dcb_capability_en =
2198                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2199         else
2200                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2201
2202         return 0;
2203 }
2204
2205 int
2206 init_port_dcb_config(portid_t pid,
2207                      enum dcb_mode_enable dcb_mode,
2208                      enum rte_eth_nb_tcs num_tcs,
2209                      uint8_t pfc_en)
2210 {
2211         struct rte_eth_conf port_conf;
2212         struct rte_port *rte_port;
2213         int retval;
2214         uint16_t i;
2215
2216         rte_port = &ports[pid];
2217
2218         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2219         /* Enter DCB configuration status */
2220         dcb_config = 1;
2221
2222         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2223         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2224         if (retval < 0)
2225                 return retval;
2226         port_conf.rxmode.hw_vlan_filter = 1;
2227
2228         /**
2229          * Write the configuration into the device.
2230          * Set the numbers of RX & TX queues to 0, so
2231          * the RX & TX queues will not be setup.
2232          */
2233         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2234
2235         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2236
2237         /* If dev_info.vmdq_pool_base is greater than 0,
2238          * the queue id of vmdq pools is started after pf queues.
2239          */
2240         if (dcb_mode == DCB_VT_ENABLED &&
2241             rte_port->dev_info.vmdq_pool_base > 0) {
2242                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2243                         " for port %d.", pid);
2244                 return -1;
2245         }
2246
2247         /* Assume the ports in testpmd have the same dcb capability
2248          * and has the same number of rxq and txq in dcb mode
2249          */
2250         if (dcb_mode == DCB_VT_ENABLED) {
2251                 if (rte_port->dev_info.max_vfs > 0) {
2252                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2253                         nb_txq = rte_port->dev_info.nb_tx_queues;
2254                 } else {
2255                         nb_rxq = rte_port->dev_info.max_rx_queues;
2256                         nb_txq = rte_port->dev_info.max_tx_queues;
2257                 }
2258         } else {
2259                 /*if vt is disabled, use all pf queues */
2260                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2261                         nb_rxq = rte_port->dev_info.max_rx_queues;
2262                         nb_txq = rte_port->dev_info.max_tx_queues;
2263                 } else {
2264                         nb_rxq = (queueid_t)num_tcs;
2265                         nb_txq = (queueid_t)num_tcs;
2266
2267                 }
2268         }
2269         rx_free_thresh = 64;
2270
2271         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2272
2273         rxtx_port_config(rte_port);
2274         /* VLAN filter */
2275         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2276         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2277                 rx_vft_set(pid, vlan_tags[i], 1);
2278
2279         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2280         map_port_queue_stats_mapping_registers(pid, rte_port);
2281
2282         rte_port->dcb_flag = 1;
2283
2284         return 0;
2285 }
2286
2287 static void
2288 init_port(void)
2289 {
2290         /* Configuration of Ethernet ports. */
2291         ports = rte_zmalloc("testpmd: ports",
2292                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2293                             RTE_CACHE_LINE_SIZE);
2294         if (ports == NULL) {
2295                 rte_exit(EXIT_FAILURE,
2296                                 "rte_zmalloc(%d struct rte_port) failed\n",
2297                                 RTE_MAX_ETHPORTS);
2298         }
2299 }
2300
2301 static void
2302 force_quit(void)
2303 {
2304         pmd_test_exit();
2305         prompt_exit();
2306 }
2307
2308 static void
2309 print_stats(void)
2310 {
2311         uint8_t i;
2312         const char clr[] = { 27, '[', '2', 'J', '\0' };
2313         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2314
2315         /* Clear screen and move to top left */
2316         printf("%s%s", clr, top_left);
2317
2318         printf("\nPort statistics ====================================");
2319         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2320                 nic_stats_display(fwd_ports_ids[i]);
2321 }
2322
2323 static void
2324 signal_handler(int signum)
2325 {
2326         if (signum == SIGINT || signum == SIGTERM) {
2327                 printf("\nSignal %d received, preparing to exit...\n",
2328                                 signum);
2329 #ifdef RTE_LIBRTE_PDUMP
2330                 /* uninitialize packet capture framework */
2331                 rte_pdump_uninit();
2332 #endif
2333 #ifdef RTE_LIBRTE_LATENCY_STATS
2334                 rte_latencystats_uninit();
2335 #endif
2336                 force_quit();
2337                 /* Set flag to indicate the force termination. */
2338                 f_quit = 1;
2339                 /* exit with the expected status */
2340                 signal(signum, SIG_DFL);
2341                 kill(getpid(), signum);
2342         }
2343 }
2344
2345 int
2346 main(int argc, char** argv)
2347 {
2348         int  diag;
2349         portid_t port_id;
2350
2351         signal(SIGINT, signal_handler);
2352         signal(SIGTERM, signal_handler);
2353
2354         diag = rte_eal_init(argc, argv);
2355         if (diag < 0)
2356                 rte_panic("Cannot init EAL\n");
2357
2358         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2359                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2360                         strerror(errno));
2361         }
2362
2363 #ifdef RTE_LIBRTE_PDUMP
2364         /* initialize packet capture framework */
2365         rte_pdump_init(NULL);
2366 #endif
2367
2368         nb_ports = (portid_t) rte_eth_dev_count();
2369         if (nb_ports == 0)
2370                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2371
2372         /* allocate port structures, and init them */
2373         init_port();
2374
2375         set_def_fwd_config();
2376         if (nb_lcores == 0)
2377                 rte_panic("Empty set of forwarding logical cores - check the "
2378                           "core mask supplied in the command parameters\n");
2379
2380         /* Bitrate/latency stats disabled by default */
2381 #ifdef RTE_LIBRTE_BITRATE
2382         bitrate_enabled = 0;
2383 #endif
2384 #ifdef RTE_LIBRTE_LATENCY_STATS
2385         latencystats_enabled = 0;
2386 #endif
2387
2388         argc -= diag;
2389         argv += diag;
2390         if (argc > 1)
2391                 launch_args_parse(argc, argv);
2392
2393         if (tx_first && interactive)
2394                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2395                                 "interactive mode.\n");
2396
2397         if (tx_first && lsc_interrupt) {
2398                 printf("Warning: lsc_interrupt needs to be off when "
2399                                 " using tx_first. Disabling.\n");
2400                 lsc_interrupt = 0;
2401         }
2402
2403         if (!nb_rxq && !nb_txq)
2404                 printf("Warning: Either rx or tx queues should be non-zero\n");
2405
2406         if (nb_rxq > 1 && nb_rxq > nb_txq)
2407                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2408                        "but nb_txq=%d will prevent to fully test it.\n",
2409                        nb_rxq, nb_txq);
2410
2411         init_config();
2412         if (start_port(RTE_PORT_ALL) != 0)
2413                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2414
2415         /* set all ports to promiscuous mode by default */
2416         RTE_ETH_FOREACH_DEV(port_id)
2417                 rte_eth_promiscuous_enable(port_id);
2418
2419         /* Init metrics library */
2420         rte_metrics_init(rte_socket_id());
2421
2422 #ifdef RTE_LIBRTE_LATENCY_STATS
2423         if (latencystats_enabled != 0) {
2424                 int ret = rte_latencystats_init(1, NULL);
2425                 if (ret)
2426                         printf("Warning: latencystats init()"
2427                                 " returned error %d\n", ret);
2428                 printf("Latencystats running on lcore %d\n",
2429                         latencystats_lcore_id);
2430         }
2431 #endif
2432
2433         /* Setup bitrate stats */
2434 #ifdef RTE_LIBRTE_BITRATE
2435         if (bitrate_enabled != 0) {
2436                 bitrate_data = rte_stats_bitrate_create();
2437                 if (bitrate_data == NULL)
2438                         rte_exit(EXIT_FAILURE,
2439                                 "Could not allocate bitrate data.\n");
2440                 rte_stats_bitrate_reg(bitrate_data);
2441         }
2442 #endif
2443
2444 #ifdef RTE_LIBRTE_CMDLINE
2445         if (strlen(cmdline_filename) != 0)
2446                 cmdline_read_from_file(cmdline_filename);
2447
2448         if (interactive == 1) {
2449                 if (auto_start) {
2450                         printf("Start automatic packet forwarding\n");
2451                         start_packet_forwarding(0);
2452                 }
2453                 prompt();
2454                 pmd_test_exit();
2455         } else
2456 #endif
2457         {
2458                 char c;
2459                 int rc;
2460
2461                 f_quit = 0;
2462
2463                 printf("No commandline core given, start packet forwarding\n");
2464                 start_packet_forwarding(tx_first);
2465                 if (stats_period != 0) {
2466                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2467                         uint64_t timer_period;
2468
2469                         /* Convert to number of cycles */
2470                         timer_period = stats_period * rte_get_timer_hz();
2471
2472                         while (f_quit == 0) {
2473                                 cur_time = rte_get_timer_cycles();
2474                                 diff_time += cur_time - prev_time;
2475
2476                                 if (diff_time >= timer_period) {
2477                                         print_stats();
2478                                         /* Reset the timer */
2479                                         diff_time = 0;
2480                                 }
2481                                 /* Sleep to avoid unnecessary checks */
2482                                 prev_time = cur_time;
2483                                 sleep(1);
2484                         }
2485                 }
2486
2487                 printf("Press enter to exit\n");
2488                 rc = read(0, &c, 1);
2489                 pmd_test_exit();
2490                 if (rc < 0)
2491                         return 1;
2492         }
2493
2494         return 0;
2495 }