app/testpmd: enable fast free Tx offload by default
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61
62 #include "testpmd.h"
63
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;        /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130         &io_fwd_engine,
131         &mac_fwd_engine,
132         &mac_swap_engine,
133         &flow_gen_engine,
134         &rx_only_engine,
135         &tx_only_engine,
136         &csum_fwd_engine,
137         &icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139         &softnic_tm_engine,
140         &softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143         &ieee1588_fwd_engine,
144 #endif
145         NULL,
146 };
147
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170         TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Receive Side Scaling (RSS) configuration.
235  */
236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
237
238 /*
239  * Port topology configuration
240  */
241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242
243 /*
244  * Avoids to flush all the RX streams before starts forwarding.
245  */
246 uint8_t no_flush_rx = 0; /* flush by default */
247
248 /*
249  * Flow API isolated mode.
250  */
251 uint8_t flow_isolate_all;
252
253 /*
254  * Avoids to check link status when starting/stopping a port.
255  */
256 uint8_t no_link_check = 0; /* check by default */
257
258 /*
259  * Enable link status change notification
260  */
261 uint8_t lsc_interrupt = 1; /* enabled by default */
262
263 /*
264  * Enable device removal notification.
265  */
266 uint8_t rmv_interrupt = 1; /* enabled by default */
267
268 /*
269  * Display or mask ether events
270  * Default to all events except VF_MBOX
271  */
272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
273                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
274                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
275                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
276                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
277                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
278
279 /*
280  * NIC bypass mode configuration options.
281  */
282
283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
284 /* The NIC bypass watchdog timeout. */
285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
286 #endif
287
288
289 #ifdef RTE_LIBRTE_LATENCY_STATS
290
291 /*
292  * Set when latency stats is enabled in the commandline
293  */
294 uint8_t latencystats_enabled;
295
296 /*
297  * Lcore ID to serive latency statistics.
298  */
299 lcoreid_t latencystats_lcore_id = -1;
300
301 #endif
302
303 /*
304  * Ethernet device configuration.
305  */
306 struct rte_eth_rxmode rx_mode = {
307         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308         .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
309                      DEV_RX_OFFLOAD_VLAN_STRIP |
310                      DEV_RX_OFFLOAD_CRC_STRIP),
311         .ignore_offload_bitfield = 1,
312 };
313
314 struct rte_eth_txmode tx_mode = {
315         .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
316 };
317
318 struct rte_fdir_conf fdir_conf = {
319         .mode = RTE_FDIR_MODE_NONE,
320         .pballoc = RTE_FDIR_PBALLOC_64K,
321         .status = RTE_FDIR_REPORT_STATUS,
322         .mask = {
323                 .vlan_tci_mask = 0x0,
324                 .ipv4_mask     = {
325                         .src_ip = 0xFFFFFFFF,
326                         .dst_ip = 0xFFFFFFFF,
327                 },
328                 .ipv6_mask     = {
329                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331                 },
332                 .src_port_mask = 0xFFFF,
333                 .dst_port_mask = 0xFFFF,
334                 .mac_addr_byte_mask = 0xFF,
335                 .tunnel_type_mask = 1,
336                 .tunnel_id_mask = 0xFFFFFFFF,
337         },
338         .drop_queue = 127,
339 };
340
341 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
342
343 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
344 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
345
346 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
347 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
348
349 uint16_t nb_tx_queue_stats_mappings = 0;
350 uint16_t nb_rx_queue_stats_mappings = 0;
351
352 /*
353  * Display zero values by default for xstats
354  */
355 uint8_t xstats_hide_zero;
356
357 unsigned int num_sockets = 0;
358 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
359
360 #ifdef RTE_LIBRTE_BITRATE
361 /* Bitrate statistics */
362 struct rte_stats_bitrates *bitrate_data;
363 lcoreid_t bitrate_lcore_id;
364 uint8_t bitrate_enabled;
365 #endif
366
367 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
368 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
369
370 /* Forward function declarations */
371 static void map_port_queue_stats_mapping_registers(portid_t pi,
372                                                    struct rte_port *port);
373 static void check_all_ports_link_status(uint32_t port_mask);
374 static int eth_event_callback(portid_t port_id,
375                               enum rte_eth_event_type type,
376                               void *param, void *ret_param);
377
378 /*
379  * Check if all the ports are started.
380  * If yes, return positive value. If not, return zero.
381  */
382 static int all_ports_started(void);
383
384 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
385 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
386
387 /*
388  * Helper function to check if socket is already discovered.
389  * If yes, return positive value. If not, return zero.
390  */
391 int
392 new_socket_id(unsigned int socket_id)
393 {
394         unsigned int i;
395
396         for (i = 0; i < num_sockets; i++) {
397                 if (socket_ids[i] == socket_id)
398                         return 0;
399         }
400         return 1;
401 }
402
403 /*
404  * Setup default configuration.
405  */
406 static void
407 set_default_fwd_lcores_config(void)
408 {
409         unsigned int i;
410         unsigned int nb_lc;
411         unsigned int sock_num;
412
413         nb_lc = 0;
414         for (i = 0; i < RTE_MAX_LCORE; i++) {
415                 sock_num = rte_lcore_to_socket_id(i);
416                 if (new_socket_id(sock_num)) {
417                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
418                                 rte_exit(EXIT_FAILURE,
419                                          "Total sockets greater than %u\n",
420                                          RTE_MAX_NUMA_NODES);
421                         }
422                         socket_ids[num_sockets++] = sock_num;
423                 }
424                 if (!rte_lcore_is_enabled(i))
425                         continue;
426                 if (i == rte_get_master_lcore())
427                         continue;
428                 fwd_lcores_cpuids[nb_lc++] = i;
429         }
430         nb_lcores = (lcoreid_t) nb_lc;
431         nb_cfg_lcores = nb_lcores;
432         nb_fwd_lcores = 1;
433 }
434
435 static void
436 set_def_peer_eth_addrs(void)
437 {
438         portid_t i;
439
440         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
441                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
442                 peer_eth_addrs[i].addr_bytes[5] = i;
443         }
444 }
445
446 static void
447 set_default_fwd_ports_config(void)
448 {
449         portid_t pt_id;
450         int i = 0;
451
452         RTE_ETH_FOREACH_DEV(pt_id)
453                 fwd_ports_ids[i++] = pt_id;
454
455         nb_cfg_ports = nb_ports;
456         nb_fwd_ports = nb_ports;
457 }
458
459 void
460 set_def_fwd_config(void)
461 {
462         set_default_fwd_lcores_config();
463         set_def_peer_eth_addrs();
464         set_default_fwd_ports_config();
465 }
466
467 /*
468  * Configuration initialisation done once at init time.
469  */
470 static void
471 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
472                  unsigned int socket_id)
473 {
474         char pool_name[RTE_MEMPOOL_NAMESIZE];
475         struct rte_mempool *rte_mp = NULL;
476         uint32_t mb_size;
477
478         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
479         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
480
481         TESTPMD_LOG(INFO,
482                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
483                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
484
485         if (mp_anon != 0) {
486                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
487                         mb_size, (unsigned) mb_mempool_cache,
488                         sizeof(struct rte_pktmbuf_pool_private),
489                         socket_id, 0);
490                 if (rte_mp == NULL)
491                         goto err;
492
493                 if (rte_mempool_populate_anon(rte_mp) == 0) {
494                         rte_mempool_free(rte_mp);
495                         rte_mp = NULL;
496                         goto err;
497                 }
498                 rte_pktmbuf_pool_init(rte_mp, NULL);
499                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
500         } else {
501                 /* wrapper to rte_mempool_create() */
502                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
503                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
504         }
505
506 err:
507         if (rte_mp == NULL) {
508                 rte_exit(EXIT_FAILURE,
509                         "Creation of mbuf pool for socket %u failed: %s\n",
510                         socket_id, rte_strerror(rte_errno));
511         } else if (verbose_level > 0) {
512                 rte_mempool_dump(stdout, rte_mp);
513         }
514 }
515
516 /*
517  * Check given socket id is valid or not with NUMA mode,
518  * if valid, return 0, else return -1
519  */
520 static int
521 check_socket_id(const unsigned int socket_id)
522 {
523         static int warning_once = 0;
524
525         if (new_socket_id(socket_id)) {
526                 if (!warning_once && numa_support)
527                         printf("Warning: NUMA should be configured manually by"
528                                " using --port-numa-config and"
529                                " --ring-numa-config parameters along with"
530                                " --numa.\n");
531                 warning_once = 1;
532                 return -1;
533         }
534         return 0;
535 }
536
537 static void
538 init_config(void)
539 {
540         portid_t pid;
541         struct rte_port *port;
542         struct rte_mempool *mbp;
543         unsigned int nb_mbuf_per_pool;
544         lcoreid_t  lc_id;
545         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
546         struct rte_gro_param gro_param;
547         uint32_t gso_types;
548
549         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
550
551         if (numa_support) {
552                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
553                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
554                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
555         }
556
557         /* Configuration of logical cores. */
558         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
559                                 sizeof(struct fwd_lcore *) * nb_lcores,
560                                 RTE_CACHE_LINE_SIZE);
561         if (fwd_lcores == NULL) {
562                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
563                                                         "failed\n", nb_lcores);
564         }
565         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
566                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
567                                                sizeof(struct fwd_lcore),
568                                                RTE_CACHE_LINE_SIZE);
569                 if (fwd_lcores[lc_id] == NULL) {
570                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
571                                                                 "failed\n");
572                 }
573                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
574         }
575
576         RTE_ETH_FOREACH_DEV(pid) {
577                 port = &ports[pid];
578                 /* Apply default Tx configuration for all ports */
579                 port->dev_conf.txmode = tx_mode;
580                 port->dev_conf.rxmode = rx_mode;
581                 rte_eth_dev_info_get(pid, &port->dev_info);
582                 if (!(port->dev_info.tx_offload_capa &
583                       DEV_TX_OFFLOAD_MBUF_FAST_FREE))
584                         port->dev_conf.txmode.offloads &=
585                                 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
586
587                 if (numa_support) {
588                         if (port_numa[pid] != NUMA_NO_CONFIG)
589                                 port_per_socket[port_numa[pid]]++;
590                         else {
591                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
592
593                                 /* if socket_id is invalid, set to 0 */
594                                 if (check_socket_id(socket_id) < 0)
595                                         socket_id = 0;
596                                 port_per_socket[socket_id]++;
597                         }
598                 }
599
600                 /* set flag to initialize port/queue */
601                 port->need_reconfig = 1;
602                 port->need_reconfig_queues = 1;
603         }
604
605         /*
606          * Create pools of mbuf.
607          * If NUMA support is disabled, create a single pool of mbuf in
608          * socket 0 memory by default.
609          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
610          *
611          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
612          * nb_txd can be configured at run time.
613          */
614         if (param_total_num_mbufs)
615                 nb_mbuf_per_pool = param_total_num_mbufs;
616         else {
617                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
618                         (nb_lcores * mb_mempool_cache) +
619                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
620                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
621         }
622
623         if (numa_support) {
624                 uint8_t i;
625
626                 for (i = 0; i < num_sockets; i++)
627                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
628                                          socket_ids[i]);
629         } else {
630                 if (socket_num == UMA_NO_CONFIG)
631                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
632                 else
633                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
634                                                  socket_num);
635         }
636
637         init_port_config();
638
639         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
640                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
641         /*
642          * Records which Mbuf pool to use by each logical core, if needed.
643          */
644         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
645                 mbp = mbuf_pool_find(
646                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
647
648                 if (mbp == NULL)
649                         mbp = mbuf_pool_find(0);
650                 fwd_lcores[lc_id]->mbp = mbp;
651                 /* initialize GSO context */
652                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
653                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
654                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
655                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
656                         ETHER_CRC_LEN;
657                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
658         }
659
660         /* Configuration of packet forwarding streams. */
661         if (init_fwd_streams() < 0)
662                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
663
664         fwd_config_setup();
665
666         /* create a gro context for each lcore */
667         gro_param.gro_types = RTE_GRO_TCP_IPV4;
668         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
669         gro_param.max_item_per_flow = MAX_PKT_BURST;
670         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
671                 gro_param.socket_id = rte_lcore_to_socket_id(
672                                 fwd_lcores_cpuids[lc_id]);
673                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
674                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
675                         rte_exit(EXIT_FAILURE,
676                                         "rte_gro_ctx_create() failed\n");
677                 }
678         }
679 }
680
681
682 void
683 reconfig(portid_t new_port_id, unsigned socket_id)
684 {
685         struct rte_port *port;
686
687         /* Reconfiguration of Ethernet ports. */
688         port = &ports[new_port_id];
689         rte_eth_dev_info_get(new_port_id, &port->dev_info);
690
691         /* set flag to initialize port/queue */
692         port->need_reconfig = 1;
693         port->need_reconfig_queues = 1;
694         port->socket_id = socket_id;
695
696         init_port_config();
697 }
698
699
700 int
701 init_fwd_streams(void)
702 {
703         portid_t pid;
704         struct rte_port *port;
705         streamid_t sm_id, nb_fwd_streams_new;
706         queueid_t q;
707
708         /* set socket id according to numa or not */
709         RTE_ETH_FOREACH_DEV(pid) {
710                 port = &ports[pid];
711                 if (nb_rxq > port->dev_info.max_rx_queues) {
712                         printf("Fail: nb_rxq(%d) is greater than "
713                                 "max_rx_queues(%d)\n", nb_rxq,
714                                 port->dev_info.max_rx_queues);
715                         return -1;
716                 }
717                 if (nb_txq > port->dev_info.max_tx_queues) {
718                         printf("Fail: nb_txq(%d) is greater than "
719                                 "max_tx_queues(%d)\n", nb_txq,
720                                 port->dev_info.max_tx_queues);
721                         return -1;
722                 }
723                 if (numa_support) {
724                         if (port_numa[pid] != NUMA_NO_CONFIG)
725                                 port->socket_id = port_numa[pid];
726                         else {
727                                 port->socket_id = rte_eth_dev_socket_id(pid);
728
729                                 /* if socket_id is invalid, set to 0 */
730                                 if (check_socket_id(port->socket_id) < 0)
731                                         port->socket_id = 0;
732                         }
733                 }
734                 else {
735                         if (socket_num == UMA_NO_CONFIG)
736                                 port->socket_id = 0;
737                         else
738                                 port->socket_id = socket_num;
739                 }
740         }
741
742         q = RTE_MAX(nb_rxq, nb_txq);
743         if (q == 0) {
744                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
745                 return -1;
746         }
747         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
748         if (nb_fwd_streams_new == nb_fwd_streams)
749                 return 0;
750         /* clear the old */
751         if (fwd_streams != NULL) {
752                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
753                         if (fwd_streams[sm_id] == NULL)
754                                 continue;
755                         rte_free(fwd_streams[sm_id]);
756                         fwd_streams[sm_id] = NULL;
757                 }
758                 rte_free(fwd_streams);
759                 fwd_streams = NULL;
760         }
761
762         /* init new */
763         nb_fwd_streams = nb_fwd_streams_new;
764         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
765                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
766         if (fwd_streams == NULL)
767                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
768                                                 "failed\n", nb_fwd_streams);
769
770         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
771                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
772                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
773                 if (fwd_streams[sm_id] == NULL)
774                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
775                                                                 " failed\n");
776         }
777
778         return 0;
779 }
780
781 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
782 static void
783 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
784 {
785         unsigned int total_burst;
786         unsigned int nb_burst;
787         unsigned int burst_stats[3];
788         uint16_t pktnb_stats[3];
789         uint16_t nb_pkt;
790         int burst_percent[3];
791
792         /*
793          * First compute the total number of packet bursts and the
794          * two highest numbers of bursts of the same number of packets.
795          */
796         total_burst = 0;
797         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
798         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
799         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
800                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
801                 if (nb_burst == 0)
802                         continue;
803                 total_burst += nb_burst;
804                 if (nb_burst > burst_stats[0]) {
805                         burst_stats[1] = burst_stats[0];
806                         pktnb_stats[1] = pktnb_stats[0];
807                         burst_stats[0] = nb_burst;
808                         pktnb_stats[0] = nb_pkt;
809                 }
810         }
811         if (total_burst == 0)
812                 return;
813         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
814         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
815                burst_percent[0], (int) pktnb_stats[0]);
816         if (burst_stats[0] == total_burst) {
817                 printf("]\n");
818                 return;
819         }
820         if (burst_stats[0] + burst_stats[1] == total_burst) {
821                 printf(" + %d%% of %d pkts]\n",
822                        100 - burst_percent[0], pktnb_stats[1]);
823                 return;
824         }
825         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
826         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
827         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
828                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
829                 return;
830         }
831         printf(" + %d%% of %d pkts + %d%% of others]\n",
832                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
833 }
834 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
835
836 static void
837 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
838 {
839         struct rte_port *port;
840         uint8_t i;
841
842         static const char *fwd_stats_border = "----------------------";
843
844         port = &ports[port_id];
845         printf("\n  %s Forward statistics for port %-2d %s\n",
846                fwd_stats_border, port_id, fwd_stats_border);
847
848         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
849                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
850                        "%-"PRIu64"\n",
851                        stats->ipackets, stats->imissed,
852                        (uint64_t) (stats->ipackets + stats->imissed));
853
854                 if (cur_fwd_eng == &csum_fwd_engine)
855                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
856                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
857                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
858                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
859                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
860                 }
861
862                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
863                        "%-"PRIu64"\n",
864                        stats->opackets, port->tx_dropped,
865                        (uint64_t) (stats->opackets + port->tx_dropped));
866         }
867         else {
868                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
869                        "%14"PRIu64"\n",
870                        stats->ipackets, stats->imissed,
871                        (uint64_t) (stats->ipackets + stats->imissed));
872
873                 if (cur_fwd_eng == &csum_fwd_engine)
874                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
875                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
876                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
877                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
878                         printf("  RX-nombufs:             %14"PRIu64"\n",
879                                stats->rx_nombuf);
880                 }
881
882                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
883                        "%14"PRIu64"\n",
884                        stats->opackets, port->tx_dropped,
885                        (uint64_t) (stats->opackets + port->tx_dropped));
886         }
887
888 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
889         if (port->rx_stream)
890                 pkt_burst_stats_display("RX",
891                         &port->rx_stream->rx_burst_stats);
892         if (port->tx_stream)
893                 pkt_burst_stats_display("TX",
894                         &port->tx_stream->tx_burst_stats);
895 #endif
896
897         if (port->rx_queue_stats_mapping_enabled) {
898                 printf("\n");
899                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
900                         printf("  Stats reg %2d RX-packets:%14"PRIu64
901                                "     RX-errors:%14"PRIu64
902                                "    RX-bytes:%14"PRIu64"\n",
903                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
904                 }
905                 printf("\n");
906         }
907         if (port->tx_queue_stats_mapping_enabled) {
908                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
909                         printf("  Stats reg %2d TX-packets:%14"PRIu64
910                                "                                 TX-bytes:%14"PRIu64"\n",
911                                i, stats->q_opackets[i], stats->q_obytes[i]);
912                 }
913         }
914
915         printf("  %s--------------------------------%s\n",
916                fwd_stats_border, fwd_stats_border);
917 }
918
919 static void
920 fwd_stream_stats_display(streamid_t stream_id)
921 {
922         struct fwd_stream *fs;
923         static const char *fwd_top_stats_border = "-------";
924
925         fs = fwd_streams[stream_id];
926         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
927             (fs->fwd_dropped == 0))
928                 return;
929         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
930                "TX Port=%2d/Queue=%2d %s\n",
931                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
932                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
933         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
934                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
935
936         /* if checksum mode */
937         if (cur_fwd_eng == &csum_fwd_engine) {
938                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
939                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
940         }
941
942 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
943         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
944         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
945 #endif
946 }
947
948 static void
949 flush_fwd_rx_queues(void)
950 {
951         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
952         portid_t  rxp;
953         portid_t port_id;
954         queueid_t rxq;
955         uint16_t  nb_rx;
956         uint16_t  i;
957         uint8_t   j;
958         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
959         uint64_t timer_period;
960
961         /* convert to number of cycles */
962         timer_period = rte_get_timer_hz(); /* 1 second timeout */
963
964         for (j = 0; j < 2; j++) {
965                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
966                         for (rxq = 0; rxq < nb_rxq; rxq++) {
967                                 port_id = fwd_ports_ids[rxp];
968                                 /**
969                                 * testpmd can stuck in the below do while loop
970                                 * if rte_eth_rx_burst() always returns nonzero
971                                 * packets. So timer is added to exit this loop
972                                 * after 1sec timer expiry.
973                                 */
974                                 prev_tsc = rte_rdtsc();
975                                 do {
976                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
977                                                 pkts_burst, MAX_PKT_BURST);
978                                         for (i = 0; i < nb_rx; i++)
979                                                 rte_pktmbuf_free(pkts_burst[i]);
980
981                                         cur_tsc = rte_rdtsc();
982                                         diff_tsc = cur_tsc - prev_tsc;
983                                         timer_tsc += diff_tsc;
984                                 } while ((nb_rx > 0) &&
985                                         (timer_tsc < timer_period));
986                                 timer_tsc = 0;
987                         }
988                 }
989                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
990         }
991 }
992
993 static void
994 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
995 {
996         struct fwd_stream **fsm;
997         streamid_t nb_fs;
998         streamid_t sm_id;
999 #ifdef RTE_LIBRTE_BITRATE
1000         uint64_t tics_per_1sec;
1001         uint64_t tics_datum;
1002         uint64_t tics_current;
1003         uint8_t idx_port, cnt_ports;
1004
1005         cnt_ports = rte_eth_dev_count();
1006         tics_datum = rte_rdtsc();
1007         tics_per_1sec = rte_get_timer_hz();
1008 #endif
1009         fsm = &fwd_streams[fc->stream_idx];
1010         nb_fs = fc->stream_nb;
1011         do {
1012                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1013                         (*pkt_fwd)(fsm[sm_id]);
1014 #ifdef RTE_LIBRTE_BITRATE
1015                 if (bitrate_enabled != 0 &&
1016                                 bitrate_lcore_id == rte_lcore_id()) {
1017                         tics_current = rte_rdtsc();
1018                         if (tics_current - tics_datum >= tics_per_1sec) {
1019                                 /* Periodic bitrate calculation */
1020                                 for (idx_port = 0;
1021                                                 idx_port < cnt_ports;
1022                                                 idx_port++)
1023                                         rte_stats_bitrate_calc(bitrate_data,
1024                                                 idx_port);
1025                                 tics_datum = tics_current;
1026                         }
1027                 }
1028 #endif
1029 #ifdef RTE_LIBRTE_LATENCY_STATS
1030                 if (latencystats_enabled != 0 &&
1031                                 latencystats_lcore_id == rte_lcore_id())
1032                         rte_latencystats_update();
1033 #endif
1034
1035         } while (! fc->stopped);
1036 }
1037
1038 static int
1039 start_pkt_forward_on_core(void *fwd_arg)
1040 {
1041         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1042                              cur_fwd_config.fwd_eng->packet_fwd);
1043         return 0;
1044 }
1045
1046 /*
1047  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1048  * Used to start communication flows in network loopback test configurations.
1049  */
1050 static int
1051 run_one_txonly_burst_on_core(void *fwd_arg)
1052 {
1053         struct fwd_lcore *fwd_lc;
1054         struct fwd_lcore tmp_lcore;
1055
1056         fwd_lc = (struct fwd_lcore *) fwd_arg;
1057         tmp_lcore = *fwd_lc;
1058         tmp_lcore.stopped = 1;
1059         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1060         return 0;
1061 }
1062
1063 /*
1064  * Launch packet forwarding:
1065  *     - Setup per-port forwarding context.
1066  *     - launch logical cores with their forwarding configuration.
1067  */
1068 static void
1069 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1070 {
1071         port_fwd_begin_t port_fwd_begin;
1072         unsigned int i;
1073         unsigned int lc_id;
1074         int diag;
1075
1076         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1077         if (port_fwd_begin != NULL) {
1078                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1079                         (*port_fwd_begin)(fwd_ports_ids[i]);
1080         }
1081         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1082                 lc_id = fwd_lcores_cpuids[i];
1083                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1084                         fwd_lcores[i]->stopped = 0;
1085                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1086                                                      fwd_lcores[i], lc_id);
1087                         if (diag != 0)
1088                                 printf("launch lcore %u failed - diag=%d\n",
1089                                        lc_id, diag);
1090                 }
1091         }
1092 }
1093
1094 /*
1095  * Launch packet forwarding configuration.
1096  */
1097 void
1098 start_packet_forwarding(int with_tx_first)
1099 {
1100         port_fwd_begin_t port_fwd_begin;
1101         port_fwd_end_t  port_fwd_end;
1102         struct rte_port *port;
1103         unsigned int i;
1104         portid_t   pt_id;
1105         streamid_t sm_id;
1106
1107         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1108                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1109
1110         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1111                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1112
1113         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1114                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1115                 (!nb_rxq || !nb_txq))
1116                 rte_exit(EXIT_FAILURE,
1117                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1118                         cur_fwd_eng->fwd_mode_name);
1119
1120         if (all_ports_started() == 0) {
1121                 printf("Not all ports were started\n");
1122                 return;
1123         }
1124         if (test_done == 0) {
1125                 printf("Packet forwarding already started\n");
1126                 return;
1127         }
1128
1129         if (init_fwd_streams() < 0) {
1130                 printf("Fail from init_fwd_streams()\n");
1131                 return;
1132         }
1133
1134         if(dcb_test) {
1135                 for (i = 0; i < nb_fwd_ports; i++) {
1136                         pt_id = fwd_ports_ids[i];
1137                         port = &ports[pt_id];
1138                         if (!port->dcb_flag) {
1139                                 printf("In DCB mode, all forwarding ports must "
1140                                        "be configured in this mode.\n");
1141                                 return;
1142                         }
1143                 }
1144                 if (nb_fwd_lcores == 1) {
1145                         printf("In DCB mode,the nb forwarding cores "
1146                                "should be larger than 1.\n");
1147                         return;
1148                 }
1149         }
1150         test_done = 0;
1151
1152         if(!no_flush_rx)
1153                 flush_fwd_rx_queues();
1154
1155         fwd_config_setup();
1156         pkt_fwd_config_display(&cur_fwd_config);
1157         rxtx_config_display();
1158
1159         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1160                 pt_id = fwd_ports_ids[i];
1161                 port = &ports[pt_id];
1162                 rte_eth_stats_get(pt_id, &port->stats);
1163                 port->tx_dropped = 0;
1164
1165                 map_port_queue_stats_mapping_registers(pt_id, port);
1166         }
1167         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1168                 fwd_streams[sm_id]->rx_packets = 0;
1169                 fwd_streams[sm_id]->tx_packets = 0;
1170                 fwd_streams[sm_id]->fwd_dropped = 0;
1171                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1172                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1173
1174 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1175                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1176                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1177                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1178                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1179 #endif
1180 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1181                 fwd_streams[sm_id]->core_cycles = 0;
1182 #endif
1183         }
1184         if (with_tx_first) {
1185                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1186                 if (port_fwd_begin != NULL) {
1187                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1188                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1189                 }
1190                 while (with_tx_first--) {
1191                         launch_packet_forwarding(
1192                                         run_one_txonly_burst_on_core);
1193                         rte_eal_mp_wait_lcore();
1194                 }
1195                 port_fwd_end = tx_only_engine.port_fwd_end;
1196                 if (port_fwd_end != NULL) {
1197                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1198                                 (*port_fwd_end)(fwd_ports_ids[i]);
1199                 }
1200         }
1201         launch_packet_forwarding(start_pkt_forward_on_core);
1202 }
1203
1204 void
1205 stop_packet_forwarding(void)
1206 {
1207         struct rte_eth_stats stats;
1208         struct rte_port *port;
1209         port_fwd_end_t  port_fwd_end;
1210         int i;
1211         portid_t   pt_id;
1212         streamid_t sm_id;
1213         lcoreid_t  lc_id;
1214         uint64_t total_recv;
1215         uint64_t total_xmit;
1216         uint64_t total_rx_dropped;
1217         uint64_t total_tx_dropped;
1218         uint64_t total_rx_nombuf;
1219         uint64_t tx_dropped;
1220         uint64_t rx_bad_ip_csum;
1221         uint64_t rx_bad_l4_csum;
1222 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1223         uint64_t fwd_cycles;
1224 #endif
1225
1226         static const char *acc_stats_border = "+++++++++++++++";
1227
1228         if (test_done) {
1229                 printf("Packet forwarding not started\n");
1230                 return;
1231         }
1232         printf("Telling cores to stop...");
1233         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1234                 fwd_lcores[lc_id]->stopped = 1;
1235         printf("\nWaiting for lcores to finish...\n");
1236         rte_eal_mp_wait_lcore();
1237         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1238         if (port_fwd_end != NULL) {
1239                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1240                         pt_id = fwd_ports_ids[i];
1241                         (*port_fwd_end)(pt_id);
1242                 }
1243         }
1244 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1245         fwd_cycles = 0;
1246 #endif
1247         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1248                 if (cur_fwd_config.nb_fwd_streams >
1249                     cur_fwd_config.nb_fwd_ports) {
1250                         fwd_stream_stats_display(sm_id);
1251                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1252                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1253                 } else {
1254                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1255                                 fwd_streams[sm_id];
1256                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1257                                 fwd_streams[sm_id];
1258                 }
1259                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1260                 tx_dropped = (uint64_t) (tx_dropped +
1261                                          fwd_streams[sm_id]->fwd_dropped);
1262                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1263
1264                 rx_bad_ip_csum =
1265                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1266                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1267                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1268                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1269                                                         rx_bad_ip_csum;
1270
1271                 rx_bad_l4_csum =
1272                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1273                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1274                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1275                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1276                                                         rx_bad_l4_csum;
1277
1278 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1279                 fwd_cycles = (uint64_t) (fwd_cycles +
1280                                          fwd_streams[sm_id]->core_cycles);
1281 #endif
1282         }
1283         total_recv = 0;
1284         total_xmit = 0;
1285         total_rx_dropped = 0;
1286         total_tx_dropped = 0;
1287         total_rx_nombuf  = 0;
1288         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1289                 pt_id = fwd_ports_ids[i];
1290
1291                 port = &ports[pt_id];
1292                 rte_eth_stats_get(pt_id, &stats);
1293                 stats.ipackets -= port->stats.ipackets;
1294                 port->stats.ipackets = 0;
1295                 stats.opackets -= port->stats.opackets;
1296                 port->stats.opackets = 0;
1297                 stats.ibytes   -= port->stats.ibytes;
1298                 port->stats.ibytes = 0;
1299                 stats.obytes   -= port->stats.obytes;
1300                 port->stats.obytes = 0;
1301                 stats.imissed  -= port->stats.imissed;
1302                 port->stats.imissed = 0;
1303                 stats.oerrors  -= port->stats.oerrors;
1304                 port->stats.oerrors = 0;
1305                 stats.rx_nombuf -= port->stats.rx_nombuf;
1306                 port->stats.rx_nombuf = 0;
1307
1308                 total_recv += stats.ipackets;
1309                 total_xmit += stats.opackets;
1310                 total_rx_dropped += stats.imissed;
1311                 total_tx_dropped += port->tx_dropped;
1312                 total_rx_nombuf  += stats.rx_nombuf;
1313
1314                 fwd_port_stats_display(pt_id, &stats);
1315         }
1316
1317         printf("\n  %s Accumulated forward statistics for all ports"
1318                "%s\n",
1319                acc_stats_border, acc_stats_border);
1320         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321                "%-"PRIu64"\n"
1322                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1323                "%-"PRIu64"\n",
1324                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1325                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1326         if (total_rx_nombuf > 0)
1327                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1328         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1329                "%s\n",
1330                acc_stats_border, acc_stats_border);
1331 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1332         if (total_recv > 0)
1333                 printf("\n  CPU cycles/packet=%u (total cycles="
1334                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1335                        (unsigned int)(fwd_cycles / total_recv),
1336                        fwd_cycles, total_recv);
1337 #endif
1338         printf("\nDone.\n");
1339         test_done = 1;
1340 }
1341
1342 void
1343 dev_set_link_up(portid_t pid)
1344 {
1345         if (rte_eth_dev_set_link_up(pid) < 0)
1346                 printf("\nSet link up fail.\n");
1347 }
1348
1349 void
1350 dev_set_link_down(portid_t pid)
1351 {
1352         if (rte_eth_dev_set_link_down(pid) < 0)
1353                 printf("\nSet link down fail.\n");
1354 }
1355
1356 static int
1357 all_ports_started(void)
1358 {
1359         portid_t pi;
1360         struct rte_port *port;
1361
1362         RTE_ETH_FOREACH_DEV(pi) {
1363                 port = &ports[pi];
1364                 /* Check if there is a port which is not started */
1365                 if ((port->port_status != RTE_PORT_STARTED) &&
1366                         (port->slave_flag == 0))
1367                         return 0;
1368         }
1369
1370         /* No port is not started */
1371         return 1;
1372 }
1373
1374 int
1375 port_is_stopped(portid_t port_id)
1376 {
1377         struct rte_port *port = &ports[port_id];
1378
1379         if ((port->port_status != RTE_PORT_STOPPED) &&
1380             (port->slave_flag == 0))
1381                 return 0;
1382         return 1;
1383 }
1384
1385 int
1386 all_ports_stopped(void)
1387 {
1388         portid_t pi;
1389
1390         RTE_ETH_FOREACH_DEV(pi) {
1391                 if (!port_is_stopped(pi))
1392                         return 0;
1393         }
1394
1395         return 1;
1396 }
1397
1398 int
1399 port_is_started(portid_t port_id)
1400 {
1401         if (port_id_is_invalid(port_id, ENABLED_WARN))
1402                 return 0;
1403
1404         if (ports[port_id].port_status != RTE_PORT_STARTED)
1405                 return 0;
1406
1407         return 1;
1408 }
1409
1410 static int
1411 port_is_closed(portid_t port_id)
1412 {
1413         if (port_id_is_invalid(port_id, ENABLED_WARN))
1414                 return 0;
1415
1416         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1417                 return 0;
1418
1419         return 1;
1420 }
1421
1422 int
1423 start_port(portid_t pid)
1424 {
1425         int diag, need_check_link_status = -1;
1426         portid_t pi;
1427         queueid_t qi;
1428         struct rte_port *port;
1429         struct ether_addr mac_addr;
1430         enum rte_eth_event_type event_type;
1431
1432         if (port_id_is_invalid(pid, ENABLED_WARN))
1433                 return 0;
1434
1435         if(dcb_config)
1436                 dcb_test = 1;
1437         RTE_ETH_FOREACH_DEV(pi) {
1438                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1439                         continue;
1440
1441                 need_check_link_status = 0;
1442                 port = &ports[pi];
1443                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1444                                                  RTE_PORT_HANDLING) == 0) {
1445                         printf("Port %d is now not stopped\n", pi);
1446                         continue;
1447                 }
1448
1449                 if (port->need_reconfig > 0) {
1450                         port->need_reconfig = 0;
1451
1452                         if (flow_isolate_all) {
1453                                 int ret = port_flow_isolate(pi, 1);
1454                                 if (ret) {
1455                                         printf("Failed to apply isolated"
1456                                                " mode on port %d\n", pi);
1457                                         return -1;
1458                                 }
1459                         }
1460
1461                         printf("Configuring Port %d (socket %u)\n", pi,
1462                                         port->socket_id);
1463                         /* configure port */
1464                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1465                                                 &(port->dev_conf));
1466                         if (diag != 0) {
1467                                 if (rte_atomic16_cmpset(&(port->port_status),
1468                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1469                                         printf("Port %d can not be set back "
1470                                                         "to stopped\n", pi);
1471                                 printf("Fail to configure port %d\n", pi);
1472                                 /* try to reconfigure port next time */
1473                                 port->need_reconfig = 1;
1474                                 return -1;
1475                         }
1476                 }
1477                 if (port->need_reconfig_queues > 0) {
1478                         port->need_reconfig_queues = 0;
1479                         port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1480                         /* Apply Tx offloads configuration */
1481                         port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1482                         /* setup tx queues */
1483                         for (qi = 0; qi < nb_txq; qi++) {
1484                                 if ((numa_support) &&
1485                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1486                                         diag = rte_eth_tx_queue_setup(pi, qi,
1487                                                 nb_txd,txring_numa[pi],
1488                                                 &(port->tx_conf));
1489                                 else
1490                                         diag = rte_eth_tx_queue_setup(pi, qi,
1491                                                 nb_txd,port->socket_id,
1492                                                 &(port->tx_conf));
1493
1494                                 if (diag == 0)
1495                                         continue;
1496
1497                                 /* Fail to setup tx queue, return */
1498                                 if (rte_atomic16_cmpset(&(port->port_status),
1499                                                         RTE_PORT_HANDLING,
1500                                                         RTE_PORT_STOPPED) == 0)
1501                                         printf("Port %d can not be set back "
1502                                                         "to stopped\n", pi);
1503                                 printf("Fail to configure port %d tx queues\n", pi);
1504                                 /* try to reconfigure queues next time */
1505                                 port->need_reconfig_queues = 1;
1506                                 return -1;
1507                         }
1508                         /* Apply Rx offloads configuration */
1509                         port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1510                         /* setup rx queues */
1511                         for (qi = 0; qi < nb_rxq; qi++) {
1512                                 if ((numa_support) &&
1513                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1514                                         struct rte_mempool * mp =
1515                                                 mbuf_pool_find(rxring_numa[pi]);
1516                                         if (mp == NULL) {
1517                                                 printf("Failed to setup RX queue:"
1518                                                         "No mempool allocation"
1519                                                         " on the socket %d\n",
1520                                                         rxring_numa[pi]);
1521                                                 return -1;
1522                                         }
1523
1524                                         diag = rte_eth_rx_queue_setup(pi, qi,
1525                                              nb_rxd,rxring_numa[pi],
1526                                              &(port->rx_conf),mp);
1527                                 } else {
1528                                         struct rte_mempool *mp =
1529                                                 mbuf_pool_find(port->socket_id);
1530                                         if (mp == NULL) {
1531                                                 printf("Failed to setup RX queue:"
1532                                                         "No mempool allocation"
1533                                                         " on the socket %d\n",
1534                                                         port->socket_id);
1535                                                 return -1;
1536                                         }
1537                                         diag = rte_eth_rx_queue_setup(pi, qi,
1538                                              nb_rxd,port->socket_id,
1539                                              &(port->rx_conf), mp);
1540                                 }
1541                                 if (diag == 0)
1542                                         continue;
1543
1544                                 /* Fail to setup rx queue, return */
1545                                 if (rte_atomic16_cmpset(&(port->port_status),
1546                                                         RTE_PORT_HANDLING,
1547                                                         RTE_PORT_STOPPED) == 0)
1548                                         printf("Port %d can not be set back "
1549                                                         "to stopped\n", pi);
1550                                 printf("Fail to configure port %d rx queues\n", pi);
1551                                 /* try to reconfigure queues next time */
1552                                 port->need_reconfig_queues = 1;
1553                                 return -1;
1554                         }
1555                 }
1556
1557                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1558                      event_type < RTE_ETH_EVENT_MAX;
1559                      event_type++) {
1560                         diag = rte_eth_dev_callback_register(pi,
1561                                                         event_type,
1562                                                         eth_event_callback,
1563                                                         NULL);
1564                         if (diag) {
1565                                 printf("Failed to setup even callback for event %d\n",
1566                                         event_type);
1567                                 return -1;
1568                         }
1569                 }
1570
1571                 /* start port */
1572                 if (rte_eth_dev_start(pi) < 0) {
1573                         printf("Fail to start port %d\n", pi);
1574
1575                         /* Fail to setup rx queue, return */
1576                         if (rte_atomic16_cmpset(&(port->port_status),
1577                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1578                                 printf("Port %d can not be set back to "
1579                                                         "stopped\n", pi);
1580                         continue;
1581                 }
1582
1583                 if (rte_atomic16_cmpset(&(port->port_status),
1584                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1585                         printf("Port %d can not be set into started\n", pi);
1586
1587                 rte_eth_macaddr_get(pi, &mac_addr);
1588                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1589                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1590                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1591                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1592
1593                 /* at least one port started, need checking link status */
1594                 need_check_link_status = 1;
1595         }
1596
1597         if (need_check_link_status == 1 && !no_link_check)
1598                 check_all_ports_link_status(RTE_PORT_ALL);
1599         else if (need_check_link_status == 0)
1600                 printf("Please stop the ports first\n");
1601
1602         printf("Done\n");
1603         return 0;
1604 }
1605
1606 void
1607 stop_port(portid_t pid)
1608 {
1609         portid_t pi;
1610         struct rte_port *port;
1611         int need_check_link_status = 0;
1612
1613         if (dcb_test) {
1614                 dcb_test = 0;
1615                 dcb_config = 0;
1616         }
1617
1618         if (port_id_is_invalid(pid, ENABLED_WARN))
1619                 return;
1620
1621         printf("Stopping ports...\n");
1622
1623         RTE_ETH_FOREACH_DEV(pi) {
1624                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1625                         continue;
1626
1627                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1628                         printf("Please remove port %d from forwarding configuration.\n", pi);
1629                         continue;
1630                 }
1631
1632                 if (port_is_bonding_slave(pi)) {
1633                         printf("Please remove port %d from bonded device.\n", pi);
1634                         continue;
1635                 }
1636
1637                 port = &ports[pi];
1638                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1639                                                 RTE_PORT_HANDLING) == 0)
1640                         continue;
1641
1642                 rte_eth_dev_stop(pi);
1643
1644                 if (rte_atomic16_cmpset(&(port->port_status),
1645                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1646                         printf("Port %d can not be set into stopped\n", pi);
1647                 need_check_link_status = 1;
1648         }
1649         if (need_check_link_status && !no_link_check)
1650                 check_all_ports_link_status(RTE_PORT_ALL);
1651
1652         printf("Done\n");
1653 }
1654
1655 void
1656 close_port(portid_t pid)
1657 {
1658         portid_t pi;
1659         struct rte_port *port;
1660
1661         if (port_id_is_invalid(pid, ENABLED_WARN))
1662                 return;
1663
1664         printf("Closing ports...\n");
1665
1666         RTE_ETH_FOREACH_DEV(pi) {
1667                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1668                         continue;
1669
1670                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1671                         printf("Please remove port %d from forwarding configuration.\n", pi);
1672                         continue;
1673                 }
1674
1675                 if (port_is_bonding_slave(pi)) {
1676                         printf("Please remove port %d from bonded device.\n", pi);
1677                         continue;
1678                 }
1679
1680                 port = &ports[pi];
1681                 if (rte_atomic16_cmpset(&(port->port_status),
1682                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1683                         printf("Port %d is already closed\n", pi);
1684                         continue;
1685                 }
1686
1687                 if (rte_atomic16_cmpset(&(port->port_status),
1688                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1689                         printf("Port %d is now not stopped\n", pi);
1690                         continue;
1691                 }
1692
1693                 if (port->flow_list)
1694                         port_flow_flush(pi);
1695                 rte_eth_dev_close(pi);
1696
1697                 if (rte_atomic16_cmpset(&(port->port_status),
1698                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1699                         printf("Port %d cannot be set to closed\n", pi);
1700         }
1701
1702         printf("Done\n");
1703 }
1704
1705 void
1706 reset_port(portid_t pid)
1707 {
1708         int diag;
1709         portid_t pi;
1710         struct rte_port *port;
1711
1712         if (port_id_is_invalid(pid, ENABLED_WARN))
1713                 return;
1714
1715         printf("Resetting ports...\n");
1716
1717         RTE_ETH_FOREACH_DEV(pi) {
1718                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1719                         continue;
1720
1721                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1722                         printf("Please remove port %d from forwarding "
1723                                "configuration.\n", pi);
1724                         continue;
1725                 }
1726
1727                 if (port_is_bonding_slave(pi)) {
1728                         printf("Please remove port %d from bonded device.\n",
1729                                pi);
1730                         continue;
1731                 }
1732
1733                 diag = rte_eth_dev_reset(pi);
1734                 if (diag == 0) {
1735                         port = &ports[pi];
1736                         port->need_reconfig = 1;
1737                         port->need_reconfig_queues = 1;
1738                 } else {
1739                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1740                 }
1741         }
1742
1743         printf("Done\n");
1744 }
1745
1746 void
1747 attach_port(char *identifier)
1748 {
1749         portid_t pi = 0;
1750         unsigned int socket_id;
1751
1752         printf("Attaching a new port...\n");
1753
1754         if (identifier == NULL) {
1755                 printf("Invalid parameters are specified\n");
1756                 return;
1757         }
1758
1759         if (rte_eth_dev_attach(identifier, &pi))
1760                 return;
1761
1762         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1763         /* if socket_id is invalid, set to 0 */
1764         if (check_socket_id(socket_id) < 0)
1765                 socket_id = 0;
1766         reconfig(pi, socket_id);
1767         rte_eth_promiscuous_enable(pi);
1768
1769         nb_ports = rte_eth_dev_count();
1770
1771         ports[pi].port_status = RTE_PORT_STOPPED;
1772
1773         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1774         printf("Done\n");
1775 }
1776
1777 void
1778 detach_port(portid_t port_id)
1779 {
1780         char name[RTE_ETH_NAME_MAX_LEN];
1781
1782         printf("Detaching a port...\n");
1783
1784         if (!port_is_closed(port_id)) {
1785                 printf("Please close port first\n");
1786                 return;
1787         }
1788
1789         if (ports[port_id].flow_list)
1790                 port_flow_flush(port_id);
1791
1792         if (rte_eth_dev_detach(port_id, name)) {
1793                 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1794                 return;
1795         }
1796
1797         nb_ports = rte_eth_dev_count();
1798
1799         printf("Port '%s' is detached. Now total ports is %d\n",
1800                         name, nb_ports);
1801         printf("Done\n");
1802         return;
1803 }
1804
1805 void
1806 pmd_test_exit(void)
1807 {
1808         portid_t pt_id;
1809
1810         if (test_done == 0)
1811                 stop_packet_forwarding();
1812
1813         if (ports != NULL) {
1814                 no_link_check = 1;
1815                 RTE_ETH_FOREACH_DEV(pt_id) {
1816                         printf("\nShutting down port %d...\n", pt_id);
1817                         fflush(stdout);
1818                         stop_port(pt_id);
1819                         close_port(pt_id);
1820                 }
1821         }
1822         printf("\nBye...\n");
1823 }
1824
1825 typedef void (*cmd_func_t)(void);
1826 struct pmd_test_command {
1827         const char *cmd_name;
1828         cmd_func_t cmd_func;
1829 };
1830
1831 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1832
1833 /* Check the link status of all ports in up to 9s, and print them finally */
1834 static void
1835 check_all_ports_link_status(uint32_t port_mask)
1836 {
1837 #define CHECK_INTERVAL 100 /* 100ms */
1838 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1839         portid_t portid;
1840         uint8_t count, all_ports_up, print_flag = 0;
1841         struct rte_eth_link link;
1842
1843         printf("Checking link statuses...\n");
1844         fflush(stdout);
1845         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1846                 all_ports_up = 1;
1847                 RTE_ETH_FOREACH_DEV(portid) {
1848                         if ((port_mask & (1 << portid)) == 0)
1849                                 continue;
1850                         memset(&link, 0, sizeof(link));
1851                         rte_eth_link_get_nowait(portid, &link);
1852                         /* print link status if flag set */
1853                         if (print_flag == 1) {
1854                                 if (link.link_status)
1855                                         printf(
1856                                         "Port%d Link Up. speed %u Mbps- %s\n",
1857                                         portid, link.link_speed,
1858                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1859                                         ("full-duplex") : ("half-duplex\n"));
1860                                 else
1861                                         printf("Port %d Link Down\n", portid);
1862                                 continue;
1863                         }
1864                         /* clear all_ports_up flag if any link down */
1865                         if (link.link_status == ETH_LINK_DOWN) {
1866                                 all_ports_up = 0;
1867                                 break;
1868                         }
1869                 }
1870                 /* after finally printing all link status, get out */
1871                 if (print_flag == 1)
1872                         break;
1873
1874                 if (all_ports_up == 0) {
1875                         fflush(stdout);
1876                         rte_delay_ms(CHECK_INTERVAL);
1877                 }
1878
1879                 /* set the print_flag if all ports up or timeout */
1880                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1881                         print_flag = 1;
1882                 }
1883
1884                 if (lsc_interrupt)
1885                         break;
1886         }
1887 }
1888
1889 static void
1890 rmv_event_callback(void *arg)
1891 {
1892         struct rte_eth_dev *dev;
1893         portid_t port_id = (intptr_t)arg;
1894
1895         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1896         dev = &rte_eth_devices[port_id];
1897
1898         stop_port(port_id);
1899         close_port(port_id);
1900         printf("removing device %s\n", dev->device->name);
1901         if (rte_eal_dev_detach(dev->device))
1902                 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1903                         dev->device->name);
1904 }
1905
1906 /* This function is used by the interrupt thread */
1907 static int
1908 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1909                   void *ret_param)
1910 {
1911         static const char * const event_desc[] = {
1912                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1913                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1914                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1915                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1916                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1917                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1918                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1919                 [RTE_ETH_EVENT_MAX] = NULL,
1920         };
1921
1922         RTE_SET_USED(param);
1923         RTE_SET_USED(ret_param);
1924
1925         if (type >= RTE_ETH_EVENT_MAX) {
1926                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1927                         port_id, __func__, type);
1928                 fflush(stderr);
1929         } else if (event_print_mask & (UINT32_C(1) << type)) {
1930                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1931                         event_desc[type]);
1932                 fflush(stdout);
1933         }
1934
1935         switch (type) {
1936         case RTE_ETH_EVENT_INTR_RMV:
1937                 if (rte_eal_alarm_set(100000,
1938                                 rmv_event_callback, (void *)(intptr_t)port_id))
1939                         fprintf(stderr, "Could not set up deferred device removal\n");
1940                 break;
1941         default:
1942                 break;
1943         }
1944         return 0;
1945 }
1946
1947 static int
1948 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1949 {
1950         uint16_t i;
1951         int diag;
1952         uint8_t mapping_found = 0;
1953
1954         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1955                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1956                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1957                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1958                                         tx_queue_stats_mappings[i].queue_id,
1959                                         tx_queue_stats_mappings[i].stats_counter_id);
1960                         if (diag != 0)
1961                                 return diag;
1962                         mapping_found = 1;
1963                 }
1964         }
1965         if (mapping_found)
1966                 port->tx_queue_stats_mapping_enabled = 1;
1967         return 0;
1968 }
1969
1970 static int
1971 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1972 {
1973         uint16_t i;
1974         int diag;
1975         uint8_t mapping_found = 0;
1976
1977         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1978                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1979                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1980                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1981                                         rx_queue_stats_mappings[i].queue_id,
1982                                         rx_queue_stats_mappings[i].stats_counter_id);
1983                         if (diag != 0)
1984                                 return diag;
1985                         mapping_found = 1;
1986                 }
1987         }
1988         if (mapping_found)
1989                 port->rx_queue_stats_mapping_enabled = 1;
1990         return 0;
1991 }
1992
1993 static void
1994 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1995 {
1996         int diag = 0;
1997
1998         diag = set_tx_queue_stats_mapping_registers(pi, port);
1999         if (diag != 0) {
2000                 if (diag == -ENOTSUP) {
2001                         port->tx_queue_stats_mapping_enabled = 0;
2002                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2003                 }
2004                 else
2005                         rte_exit(EXIT_FAILURE,
2006                                         "set_tx_queue_stats_mapping_registers "
2007                                         "failed for port id=%d diag=%d\n",
2008                                         pi, diag);
2009         }
2010
2011         diag = set_rx_queue_stats_mapping_registers(pi, port);
2012         if (diag != 0) {
2013                 if (diag == -ENOTSUP) {
2014                         port->rx_queue_stats_mapping_enabled = 0;
2015                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2016                 }
2017                 else
2018                         rte_exit(EXIT_FAILURE,
2019                                         "set_rx_queue_stats_mapping_registers "
2020                                         "failed for port id=%d diag=%d\n",
2021                                         pi, diag);
2022         }
2023 }
2024
2025 static void
2026 rxtx_port_config(struct rte_port *port)
2027 {
2028         port->rx_conf = port->dev_info.default_rxconf;
2029         port->tx_conf = port->dev_info.default_txconf;
2030
2031         /* Check if any RX/TX parameters have been passed */
2032         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2033                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2034
2035         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2036                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2037
2038         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2039                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2040
2041         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2042                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2043
2044         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2045                 port->rx_conf.rx_drop_en = rx_drop_en;
2046
2047         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2048                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2049
2050         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2051                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2052
2053         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2054                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2055
2056         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2057                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2058
2059         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2060                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2061 }
2062
2063 void
2064 init_port_config(void)
2065 {
2066         portid_t pid;
2067         struct rte_port *port;
2068
2069         RTE_ETH_FOREACH_DEV(pid) {
2070                 port = &ports[pid];
2071                 port->dev_conf.fdir_conf = fdir_conf;
2072                 if (nb_rxq > 1) {
2073                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2074                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2075                 } else {
2076                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2077                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2078                 }
2079
2080                 if (port->dcb_flag == 0) {
2081                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2082                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2083                         else
2084                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2085                 }
2086
2087                 rxtx_port_config(port);
2088
2089                 rte_eth_macaddr_get(pid, &port->eth_addr);
2090
2091                 map_port_queue_stats_mapping_registers(pid, port);
2092 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2093                 rte_pmd_ixgbe_bypass_init(pid);
2094 #endif
2095
2096                 if (lsc_interrupt &&
2097                     (rte_eth_devices[pid].data->dev_flags &
2098                      RTE_ETH_DEV_INTR_LSC))
2099                         port->dev_conf.intr_conf.lsc = 1;
2100                 if (rmv_interrupt &&
2101                     (rte_eth_devices[pid].data->dev_flags &
2102                      RTE_ETH_DEV_INTR_RMV))
2103                         port->dev_conf.intr_conf.rmv = 1;
2104
2105 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2106                 /* Detect softnic port */
2107                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2108                         port->softnic_enable = 1;
2109                         memset(&port->softport, 0, sizeof(struct softnic_port));
2110
2111                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2112                                 port->softport.tm_flag = 1;
2113                 }
2114 #endif
2115         }
2116 }
2117
2118 void set_port_slave_flag(portid_t slave_pid)
2119 {
2120         struct rte_port *port;
2121
2122         port = &ports[slave_pid];
2123         port->slave_flag = 1;
2124 }
2125
2126 void clear_port_slave_flag(portid_t slave_pid)
2127 {
2128         struct rte_port *port;
2129
2130         port = &ports[slave_pid];
2131         port->slave_flag = 0;
2132 }
2133
2134 uint8_t port_is_bonding_slave(portid_t slave_pid)
2135 {
2136         struct rte_port *port;
2137
2138         port = &ports[slave_pid];
2139         return port->slave_flag;
2140 }
2141
2142 const uint16_t vlan_tags[] = {
2143                 0,  1,  2,  3,  4,  5,  6,  7,
2144                 8,  9, 10, 11,  12, 13, 14, 15,
2145                 16, 17, 18, 19, 20, 21, 22, 23,
2146                 24, 25, 26, 27, 28, 29, 30, 31
2147 };
2148
2149 static  int
2150 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2151                  enum dcb_mode_enable dcb_mode,
2152                  enum rte_eth_nb_tcs num_tcs,
2153                  uint8_t pfc_en)
2154 {
2155         uint8_t i;
2156
2157         /*
2158          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2159          * given above, and the number of traffic classes available for use.
2160          */
2161         if (dcb_mode == DCB_VT_ENABLED) {
2162                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2163                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2164                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2165                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2166
2167                 /* VMDQ+DCB RX and TX configurations */
2168                 vmdq_rx_conf->enable_default_pool = 0;
2169                 vmdq_rx_conf->default_pool = 0;
2170                 vmdq_rx_conf->nb_queue_pools =
2171                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2172                 vmdq_tx_conf->nb_queue_pools =
2173                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2174
2175                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2176                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2177                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2178                         vmdq_rx_conf->pool_map[i].pools =
2179                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2180                 }
2181                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2182                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2183                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2184                 }
2185
2186                 /* set DCB mode of RX and TX of multiple queues */
2187                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2188                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2189         } else {
2190                 struct rte_eth_dcb_rx_conf *rx_conf =
2191                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2192                 struct rte_eth_dcb_tx_conf *tx_conf =
2193                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2194
2195                 rx_conf->nb_tcs = num_tcs;
2196                 tx_conf->nb_tcs = num_tcs;
2197
2198                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2199                         rx_conf->dcb_tc[i] = i % num_tcs;
2200                         tx_conf->dcb_tc[i] = i % num_tcs;
2201                 }
2202                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2203                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2204                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2205         }
2206
2207         if (pfc_en)
2208                 eth_conf->dcb_capability_en =
2209                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2210         else
2211                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2212
2213         return 0;
2214 }
2215
2216 int
2217 init_port_dcb_config(portid_t pid,
2218                      enum dcb_mode_enable dcb_mode,
2219                      enum rte_eth_nb_tcs num_tcs,
2220                      uint8_t pfc_en)
2221 {
2222         struct rte_eth_conf port_conf;
2223         struct rte_port *rte_port;
2224         int retval;
2225         uint16_t i;
2226
2227         rte_port = &ports[pid];
2228
2229         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2230         /* Enter DCB configuration status */
2231         dcb_config = 1;
2232
2233         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2234         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2235         if (retval < 0)
2236                 return retval;
2237         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2238
2239         /**
2240          * Write the configuration into the device.
2241          * Set the numbers of RX & TX queues to 0, so
2242          * the RX & TX queues will not be setup.
2243          */
2244         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2245
2246         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2247
2248         /* If dev_info.vmdq_pool_base is greater than 0,
2249          * the queue id of vmdq pools is started after pf queues.
2250          */
2251         if (dcb_mode == DCB_VT_ENABLED &&
2252             rte_port->dev_info.vmdq_pool_base > 0) {
2253                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2254                         " for port %d.", pid);
2255                 return -1;
2256         }
2257
2258         /* Assume the ports in testpmd have the same dcb capability
2259          * and has the same number of rxq and txq in dcb mode
2260          */
2261         if (dcb_mode == DCB_VT_ENABLED) {
2262                 if (rte_port->dev_info.max_vfs > 0) {
2263                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2264                         nb_txq = rte_port->dev_info.nb_tx_queues;
2265                 } else {
2266                         nb_rxq = rte_port->dev_info.max_rx_queues;
2267                         nb_txq = rte_port->dev_info.max_tx_queues;
2268                 }
2269         } else {
2270                 /*if vt is disabled, use all pf queues */
2271                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2272                         nb_rxq = rte_port->dev_info.max_rx_queues;
2273                         nb_txq = rte_port->dev_info.max_tx_queues;
2274                 } else {
2275                         nb_rxq = (queueid_t)num_tcs;
2276                         nb_txq = (queueid_t)num_tcs;
2277
2278                 }
2279         }
2280         rx_free_thresh = 64;
2281
2282         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2283
2284         rxtx_port_config(rte_port);
2285         /* VLAN filter */
2286         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2287         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2288                 rx_vft_set(pid, vlan_tags[i], 1);
2289
2290         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2291         map_port_queue_stats_mapping_registers(pid, rte_port);
2292
2293         rte_port->dcb_flag = 1;
2294
2295         return 0;
2296 }
2297
2298 static void
2299 init_port(void)
2300 {
2301         /* Configuration of Ethernet ports. */
2302         ports = rte_zmalloc("testpmd: ports",
2303                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2304                             RTE_CACHE_LINE_SIZE);
2305         if (ports == NULL) {
2306                 rte_exit(EXIT_FAILURE,
2307                                 "rte_zmalloc(%d struct rte_port) failed\n",
2308                                 RTE_MAX_ETHPORTS);
2309         }
2310 }
2311
2312 static void
2313 force_quit(void)
2314 {
2315         pmd_test_exit();
2316         prompt_exit();
2317 }
2318
2319 static void
2320 print_stats(void)
2321 {
2322         uint8_t i;
2323         const char clr[] = { 27, '[', '2', 'J', '\0' };
2324         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2325
2326         /* Clear screen and move to top left */
2327         printf("%s%s", clr, top_left);
2328
2329         printf("\nPort statistics ====================================");
2330         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2331                 nic_stats_display(fwd_ports_ids[i]);
2332 }
2333
2334 static void
2335 signal_handler(int signum)
2336 {
2337         if (signum == SIGINT || signum == SIGTERM) {
2338                 printf("\nSignal %d received, preparing to exit...\n",
2339                                 signum);
2340 #ifdef RTE_LIBRTE_PDUMP
2341                 /* uninitialize packet capture framework */
2342                 rte_pdump_uninit();
2343 #endif
2344 #ifdef RTE_LIBRTE_LATENCY_STATS
2345                 rte_latencystats_uninit();
2346 #endif
2347                 force_quit();
2348                 /* Set flag to indicate the force termination. */
2349                 f_quit = 1;
2350                 /* exit with the expected status */
2351                 signal(signum, SIG_DFL);
2352                 kill(getpid(), signum);
2353         }
2354 }
2355
2356 int
2357 main(int argc, char** argv)
2358 {
2359         int  diag;
2360         portid_t port_id;
2361
2362         signal(SIGINT, signal_handler);
2363         signal(SIGTERM, signal_handler);
2364
2365         diag = rte_eal_init(argc, argv);
2366         if (diag < 0)
2367                 rte_panic("Cannot init EAL\n");
2368
2369         testpmd_logtype = rte_log_register("testpmd");
2370         if (testpmd_logtype < 0)
2371                 rte_panic("Cannot register log type");
2372         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2373
2374         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2375                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2376                         strerror(errno));
2377         }
2378
2379 #ifdef RTE_LIBRTE_PDUMP
2380         /* initialize packet capture framework */
2381         rte_pdump_init(NULL);
2382 #endif
2383
2384         nb_ports = (portid_t) rte_eth_dev_count();
2385         if (nb_ports == 0)
2386                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2387
2388         /* allocate port structures, and init them */
2389         init_port();
2390
2391         set_def_fwd_config();
2392         if (nb_lcores == 0)
2393                 rte_panic("Empty set of forwarding logical cores - check the "
2394                           "core mask supplied in the command parameters\n");
2395
2396         /* Bitrate/latency stats disabled by default */
2397 #ifdef RTE_LIBRTE_BITRATE
2398         bitrate_enabled = 0;
2399 #endif
2400 #ifdef RTE_LIBRTE_LATENCY_STATS
2401         latencystats_enabled = 0;
2402 #endif
2403
2404         argc -= diag;
2405         argv += diag;
2406         if (argc > 1)
2407                 launch_args_parse(argc, argv);
2408
2409         if (tx_first && interactive)
2410                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2411                                 "interactive mode.\n");
2412
2413         if (tx_first && lsc_interrupt) {
2414                 printf("Warning: lsc_interrupt needs to be off when "
2415                                 " using tx_first. Disabling.\n");
2416                 lsc_interrupt = 0;
2417         }
2418
2419         if (!nb_rxq && !nb_txq)
2420                 printf("Warning: Either rx or tx queues should be non-zero\n");
2421
2422         if (nb_rxq > 1 && nb_rxq > nb_txq)
2423                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2424                        "but nb_txq=%d will prevent to fully test it.\n",
2425                        nb_rxq, nb_txq);
2426
2427         init_config();
2428         if (start_port(RTE_PORT_ALL) != 0)
2429                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2430
2431         /* set all ports to promiscuous mode by default */
2432         RTE_ETH_FOREACH_DEV(port_id)
2433                 rte_eth_promiscuous_enable(port_id);
2434
2435         /* Init metrics library */
2436         rte_metrics_init(rte_socket_id());
2437
2438 #ifdef RTE_LIBRTE_LATENCY_STATS
2439         if (latencystats_enabled != 0) {
2440                 int ret = rte_latencystats_init(1, NULL);
2441                 if (ret)
2442                         printf("Warning: latencystats init()"
2443                                 " returned error %d\n", ret);
2444                 printf("Latencystats running on lcore %d\n",
2445                         latencystats_lcore_id);
2446         }
2447 #endif
2448
2449         /* Setup bitrate stats */
2450 #ifdef RTE_LIBRTE_BITRATE
2451         if (bitrate_enabled != 0) {
2452                 bitrate_data = rte_stats_bitrate_create();
2453                 if (bitrate_data == NULL)
2454                         rte_exit(EXIT_FAILURE,
2455                                 "Could not allocate bitrate data.\n");
2456                 rte_stats_bitrate_reg(bitrate_data);
2457         }
2458 #endif
2459
2460 #ifdef RTE_LIBRTE_CMDLINE
2461         if (strlen(cmdline_filename) != 0)
2462                 cmdline_read_from_file(cmdline_filename);
2463
2464         if (interactive == 1) {
2465                 if (auto_start) {
2466                         printf("Start automatic packet forwarding\n");
2467                         start_packet_forwarding(0);
2468                 }
2469                 prompt();
2470                 pmd_test_exit();
2471         } else
2472 #endif
2473         {
2474                 char c;
2475                 int rc;
2476
2477                 f_quit = 0;
2478
2479                 printf("No commandline core given, start packet forwarding\n");
2480                 start_packet_forwarding(tx_first);
2481                 if (stats_period != 0) {
2482                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2483                         uint64_t timer_period;
2484
2485                         /* Convert to number of cycles */
2486                         timer_period = stats_period * rte_get_timer_hz();
2487
2488                         while (f_quit == 0) {
2489                                 cur_time = rte_get_timer_cycles();
2490                                 diff_time += cur_time - prev_time;
2491
2492                                 if (diff_time >= timer_period) {
2493                                         print_stats();
2494                                         /* Reset the timer */
2495                                         diff_time = 0;
2496                                 }
2497                                 /* Sleep to avoid unnecessary checks */
2498                                 prev_time = cur_time;
2499                                 sleep(1);
2500                         }
2501                 }
2502
2503                 printf("Press enter to exit\n");
2504                 rc = read(0, &c, 1);
2505                 pmd_test_exit();
2506                 if (rc < 0)
2507                         return 1;
2508         }
2509
2510         return 0;
2511 }