57a6f895c2b7efd4af1ca19aca80cac106a41a7f
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91
92 #include "testpmd.h"
93
94 uint16_t verbose_level = 0; /**< Silent by default. */
95
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;        /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159         &io_fwd_engine,
160         &mac_fwd_engine,
161         &mac_swap_engine,
162         &flow_gen_engine,
163         &rx_only_engine,
164         &tx_only_engine,
165         &csum_fwd_engine,
166         &icmp_echo_engine,
167 #ifdef RTE_LIBRTE_IEEE1588
168         &ieee1588_fwd_engine,
169 #endif
170         NULL,
171 };
172
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
178
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
181                                       * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
183 /*
184  * Configuration of packet segments used by the "txonly" processing engine.
185  */
186 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
187 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
188         TXONLY_DEF_PACKET_LEN,
189 };
190 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
191
192 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
193 /**< Split policy for packets to TX. */
194
195 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
196 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
197
198 /* current configuration is in DCB or not,0 means it is not in DCB mode */
199 uint8_t dcb_config = 0;
200
201 /* Whether the dcb is in testing status */
202 uint8_t dcb_test = 0;
203
204 /*
205  * Configurable number of RX/TX queues.
206  */
207 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
208 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209
210 /*
211  * Configurable number of RX/TX ring descriptors.
212  */
213 #define RTE_TEST_RX_DESC_DEFAULT 128
214 #define RTE_TEST_TX_DESC_DEFAULT 512
215 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
216 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
217
218 #define RTE_PMD_PARAM_UNSET -1
219 /*
220  * Configurable values of RX and TX ring threshold registers.
221  */
222
223 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
224 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
226
227 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230
231 /*
232  * Configurable value of RX free threshold.
233  */
234 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235
236 /*
237  * Configurable value of RX drop enable.
238  */
239 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240
241 /*
242  * Configurable value of TX free threshold.
243  */
244 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245
246 /*
247  * Configurable value of TX RS bit threshold.
248  */
249 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250
251 /*
252  * Configurable value of TX queue flags.
253  */
254 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255
256 /*
257  * Receive Side Scaling (RSS) configuration.
258  */
259 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260
261 /*
262  * Port topology configuration
263  */
264 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265
266 /*
267  * Avoids to flush all the RX streams before starts forwarding.
268  */
269 uint8_t no_flush_rx = 0; /* flush by default */
270
271 /*
272  * Flow API isolated mode.
273  */
274 uint8_t flow_isolate_all;
275
276 /*
277  * Avoids to check link status when starting/stopping a port.
278  */
279 uint8_t no_link_check = 0; /* check by default */
280
281 /*
282  * Enable link status change notification
283  */
284 uint8_t lsc_interrupt = 1; /* enabled by default */
285
286 /*
287  * Enable device removal notification.
288  */
289 uint8_t rmv_interrupt = 1; /* enabled by default */
290
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301
302 /*
303  * NIC bypass mode configuration options.
304  */
305
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310
311
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323
324 #endif
325
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331         .split_hdr_size = 0,
332         .header_split   = 0, /**< Header Split disabled. */
333         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
334         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
335         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
336         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
337         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
338         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
339 };
340
341 struct rte_fdir_conf fdir_conf = {
342         .mode = RTE_FDIR_MODE_NONE,
343         .pballoc = RTE_FDIR_PBALLOC_64K,
344         .status = RTE_FDIR_REPORT_STATUS,
345         .mask = {
346                 .vlan_tci_mask = 0x0,
347                 .ipv4_mask     = {
348                         .src_ip = 0xFFFFFFFF,
349                         .dst_ip = 0xFFFFFFFF,
350                 },
351                 .ipv6_mask     = {
352                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354                 },
355                 .src_port_mask = 0xFFFF,
356                 .dst_port_mask = 0xFFFF,
357                 .mac_addr_byte_mask = 0xFF,
358                 .tunnel_type_mask = 1,
359                 .tunnel_id_mask = 0xFFFFFFFF,
360         },
361         .drop_queue = 127,
362 };
363
364 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
365
366 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
368
369 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
370 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
371
372 uint16_t nb_tx_queue_stats_mappings = 0;
373 uint16_t nb_rx_queue_stats_mappings = 0;
374
375 unsigned int num_sockets = 0;
376 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377
378 #ifdef RTE_LIBRTE_BITRATE
379 /* Bitrate statistics */
380 struct rte_stats_bitrates *bitrate_data;
381 lcoreid_t bitrate_lcore_id;
382 uint8_t bitrate_enabled;
383 #endif
384
385 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
386 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
387
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392                               enum rte_eth_event_type type,
393                               void *param, void *ret_param);
394
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
403
404 /*
405  * Helper function to check if socket is already discovered.
406  * If yes, return positive value. If not, return zero.
407  */
408 int
409 new_socket_id(unsigned int socket_id)
410 {
411         unsigned int i;
412
413         for (i = 0; i < num_sockets; i++) {
414                 if (socket_ids[i] == socket_id)
415                         return 0;
416         }
417         return 1;
418 }
419
420 /*
421  * Setup default configuration.
422  */
423 static void
424 set_default_fwd_lcores_config(void)
425 {
426         unsigned int i;
427         unsigned int nb_lc;
428         unsigned int sock_num;
429
430         nb_lc = 0;
431         for (i = 0; i < RTE_MAX_LCORE; i++) {
432                 sock_num = rte_lcore_to_socket_id(i);
433                 if (new_socket_id(sock_num)) {
434                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
435                                 rte_exit(EXIT_FAILURE,
436                                          "Total sockets greater than %u\n",
437                                          RTE_MAX_NUMA_NODES);
438                         }
439                         socket_ids[num_sockets++] = sock_num;
440                 }
441                 if (!rte_lcore_is_enabled(i))
442                         continue;
443                 if (i == rte_get_master_lcore())
444                         continue;
445                 fwd_lcores_cpuids[nb_lc++] = i;
446         }
447         nb_lcores = (lcoreid_t) nb_lc;
448         nb_cfg_lcores = nb_lcores;
449         nb_fwd_lcores = 1;
450 }
451
452 static void
453 set_def_peer_eth_addrs(void)
454 {
455         portid_t i;
456
457         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459                 peer_eth_addrs[i].addr_bytes[5] = i;
460         }
461 }
462
463 static void
464 set_default_fwd_ports_config(void)
465 {
466         portid_t pt_id;
467
468         for (pt_id = 0; pt_id < nb_ports; pt_id++)
469                 fwd_ports_ids[pt_id] = pt_id;
470
471         nb_cfg_ports = nb_ports;
472         nb_fwd_ports = nb_ports;
473 }
474
475 void
476 set_def_fwd_config(void)
477 {
478         set_default_fwd_lcores_config();
479         set_def_peer_eth_addrs();
480         set_default_fwd_ports_config();
481 }
482
483 /*
484  * Configuration initialisation done once at init time.
485  */
486 static void
487 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
488                  unsigned int socket_id)
489 {
490         char pool_name[RTE_MEMPOOL_NAMESIZE];
491         struct rte_mempool *rte_mp = NULL;
492         uint32_t mb_size;
493
494         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
495         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
496
497         RTE_LOG(INFO, USER1,
498                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
499                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
500
501         if (mp_anon != 0) {
502                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
503                         mb_size, (unsigned) mb_mempool_cache,
504                         sizeof(struct rte_pktmbuf_pool_private),
505                         socket_id, 0);
506                 if (rte_mp == NULL)
507                         goto err;
508
509                 if (rte_mempool_populate_anon(rte_mp) == 0) {
510                         rte_mempool_free(rte_mp);
511                         rte_mp = NULL;
512                         goto err;
513                 }
514                 rte_pktmbuf_pool_init(rte_mp, NULL);
515                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
516         } else {
517                 /* wrapper to rte_mempool_create() */
518                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
519                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
520         }
521
522 err:
523         if (rte_mp == NULL) {
524                 rte_exit(EXIT_FAILURE,
525                         "Creation of mbuf pool for socket %u failed: %s\n",
526                         socket_id, rte_strerror(rte_errno));
527         } else if (verbose_level > 0) {
528                 rte_mempool_dump(stdout, rte_mp);
529         }
530 }
531
532 /*
533  * Check given socket id is valid or not with NUMA mode,
534  * if valid, return 0, else return -1
535  */
536 static int
537 check_socket_id(const unsigned int socket_id)
538 {
539         static int warning_once = 0;
540
541         if (new_socket_id(socket_id)) {
542                 if (!warning_once && numa_support)
543                         printf("Warning: NUMA should be configured manually by"
544                                " using --port-numa-config and"
545                                " --ring-numa-config parameters along with"
546                                " --numa.\n");
547                 warning_once = 1;
548                 return -1;
549         }
550         return 0;
551 }
552
553 static void
554 init_config(void)
555 {
556         portid_t pid;
557         struct rte_port *port;
558         struct rte_mempool *mbp;
559         unsigned int nb_mbuf_per_pool;
560         lcoreid_t  lc_id;
561         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
562         struct rte_gro_param gro_param;
563         uint32_t gso_types;
564
565         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
566
567         if (numa_support) {
568                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
569                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571         }
572
573         /* Configuration of logical cores. */
574         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
575                                 sizeof(struct fwd_lcore *) * nb_lcores,
576                                 RTE_CACHE_LINE_SIZE);
577         if (fwd_lcores == NULL) {
578                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
579                                                         "failed\n", nb_lcores);
580         }
581         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
582                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
583                                                sizeof(struct fwd_lcore),
584                                                RTE_CACHE_LINE_SIZE);
585                 if (fwd_lcores[lc_id] == NULL) {
586                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
587                                                                 "failed\n");
588                 }
589                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
590         }
591
592         RTE_ETH_FOREACH_DEV(pid) {
593                 port = &ports[pid];
594                 rte_eth_dev_info_get(pid, &port->dev_info);
595
596                 if (numa_support) {
597                         if (port_numa[pid] != NUMA_NO_CONFIG)
598                                 port_per_socket[port_numa[pid]]++;
599                         else {
600                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
601
602                                 /* if socket_id is invalid, set to 0 */
603                                 if (check_socket_id(socket_id) < 0)
604                                         socket_id = 0;
605                                 port_per_socket[socket_id]++;
606                         }
607                 }
608
609                 /* set flag to initialize port/queue */
610                 port->need_reconfig = 1;
611                 port->need_reconfig_queues = 1;
612         }
613
614         /*
615          * Create pools of mbuf.
616          * If NUMA support is disabled, create a single pool of mbuf in
617          * socket 0 memory by default.
618          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
619          *
620          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
621          * nb_txd can be configured at run time.
622          */
623         if (param_total_num_mbufs)
624                 nb_mbuf_per_pool = param_total_num_mbufs;
625         else {
626                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
627                         (nb_lcores * mb_mempool_cache) +
628                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
629                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
630         }
631
632         if (numa_support) {
633                 uint8_t i;
634
635                 for (i = 0; i < num_sockets; i++)
636                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
637                                          socket_ids[i]);
638         } else {
639                 if (socket_num == UMA_NO_CONFIG)
640                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
641                 else
642                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
643                                                  socket_num);
644         }
645
646         init_port_config();
647
648         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
649                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
650         /*
651          * Records which Mbuf pool to use by each logical core, if needed.
652          */
653         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
654                 mbp = mbuf_pool_find(
655                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
656
657                 if (mbp == NULL)
658                         mbp = mbuf_pool_find(0);
659                 fwd_lcores[lc_id]->mbp = mbp;
660                 /* initialize GSO context */
661                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
662                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
663                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
664                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
665                         ETHER_CRC_LEN;
666                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
667         }
668
669         /* Configuration of packet forwarding streams. */
670         if (init_fwd_streams() < 0)
671                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
672
673         fwd_config_setup();
674
675         /* create a gro context for each lcore */
676         gro_param.gro_types = RTE_GRO_TCP_IPV4;
677         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
678         gro_param.max_item_per_flow = MAX_PKT_BURST;
679         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
680                 gro_param.socket_id = rte_lcore_to_socket_id(
681                                 fwd_lcores_cpuids[lc_id]);
682                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
683                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
684                         rte_exit(EXIT_FAILURE,
685                                         "rte_gro_ctx_create() failed\n");
686                 }
687         }
688 }
689
690
691 void
692 reconfig(portid_t new_port_id, unsigned socket_id)
693 {
694         struct rte_port *port;
695
696         /* Reconfiguration of Ethernet ports. */
697         port = &ports[new_port_id];
698         rte_eth_dev_info_get(new_port_id, &port->dev_info);
699
700         /* set flag to initialize port/queue */
701         port->need_reconfig = 1;
702         port->need_reconfig_queues = 1;
703         port->socket_id = socket_id;
704
705         init_port_config();
706 }
707
708
709 int
710 init_fwd_streams(void)
711 {
712         portid_t pid;
713         struct rte_port *port;
714         streamid_t sm_id, nb_fwd_streams_new;
715         queueid_t q;
716
717         /* set socket id according to numa or not */
718         RTE_ETH_FOREACH_DEV(pid) {
719                 port = &ports[pid];
720                 if (nb_rxq > port->dev_info.max_rx_queues) {
721                         printf("Fail: nb_rxq(%d) is greater than "
722                                 "max_rx_queues(%d)\n", nb_rxq,
723                                 port->dev_info.max_rx_queues);
724                         return -1;
725                 }
726                 if (nb_txq > port->dev_info.max_tx_queues) {
727                         printf("Fail: nb_txq(%d) is greater than "
728                                 "max_tx_queues(%d)\n", nb_txq,
729                                 port->dev_info.max_tx_queues);
730                         return -1;
731                 }
732                 if (numa_support) {
733                         if (port_numa[pid] != NUMA_NO_CONFIG)
734                                 port->socket_id = port_numa[pid];
735                         else {
736                                 port->socket_id = rte_eth_dev_socket_id(pid);
737
738                                 /* if socket_id is invalid, set to 0 */
739                                 if (check_socket_id(port->socket_id) < 0)
740                                         port->socket_id = 0;
741                         }
742                 }
743                 else {
744                         if (socket_num == UMA_NO_CONFIG)
745                                 port->socket_id = 0;
746                         else
747                                 port->socket_id = socket_num;
748                 }
749         }
750
751         q = RTE_MAX(nb_rxq, nb_txq);
752         if (q == 0) {
753                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
754                 return -1;
755         }
756         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
757         if (nb_fwd_streams_new == nb_fwd_streams)
758                 return 0;
759         /* clear the old */
760         if (fwd_streams != NULL) {
761                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
762                         if (fwd_streams[sm_id] == NULL)
763                                 continue;
764                         rte_free(fwd_streams[sm_id]);
765                         fwd_streams[sm_id] = NULL;
766                 }
767                 rte_free(fwd_streams);
768                 fwd_streams = NULL;
769         }
770
771         /* init new */
772         nb_fwd_streams = nb_fwd_streams_new;
773         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
774                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
775         if (fwd_streams == NULL)
776                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
777                                                 "failed\n", nb_fwd_streams);
778
779         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
780                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
781                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
782                 if (fwd_streams[sm_id] == NULL)
783                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
784                                                                 " failed\n");
785         }
786
787         return 0;
788 }
789
790 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
791 static void
792 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
793 {
794         unsigned int total_burst;
795         unsigned int nb_burst;
796         unsigned int burst_stats[3];
797         uint16_t pktnb_stats[3];
798         uint16_t nb_pkt;
799         int burst_percent[3];
800
801         /*
802          * First compute the total number of packet bursts and the
803          * two highest numbers of bursts of the same number of packets.
804          */
805         total_burst = 0;
806         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
807         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
808         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
809                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
810                 if (nb_burst == 0)
811                         continue;
812                 total_burst += nb_burst;
813                 if (nb_burst > burst_stats[0]) {
814                         burst_stats[1] = burst_stats[0];
815                         pktnb_stats[1] = pktnb_stats[0];
816                         burst_stats[0] = nb_burst;
817                         pktnb_stats[0] = nb_pkt;
818                 }
819         }
820         if (total_burst == 0)
821                 return;
822         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
823         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
824                burst_percent[0], (int) pktnb_stats[0]);
825         if (burst_stats[0] == total_burst) {
826                 printf("]\n");
827                 return;
828         }
829         if (burst_stats[0] + burst_stats[1] == total_burst) {
830                 printf(" + %d%% of %d pkts]\n",
831                        100 - burst_percent[0], pktnb_stats[1]);
832                 return;
833         }
834         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
835         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
836         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
837                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
838                 return;
839         }
840         printf(" + %d%% of %d pkts + %d%% of others]\n",
841                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
842 }
843 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
844
845 static void
846 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
847 {
848         struct rte_port *port;
849         uint8_t i;
850
851         static const char *fwd_stats_border = "----------------------";
852
853         port = &ports[port_id];
854         printf("\n  %s Forward statistics for port %-2d %s\n",
855                fwd_stats_border, port_id, fwd_stats_border);
856
857         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
858                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
859                        "%-"PRIu64"\n",
860                        stats->ipackets, stats->imissed,
861                        (uint64_t) (stats->ipackets + stats->imissed));
862
863                 if (cur_fwd_eng == &csum_fwd_engine)
864                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
865                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
866                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
867                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
868                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
869                 }
870
871                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
872                        "%-"PRIu64"\n",
873                        stats->opackets, port->tx_dropped,
874                        (uint64_t) (stats->opackets + port->tx_dropped));
875         }
876         else {
877                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
878                        "%14"PRIu64"\n",
879                        stats->ipackets, stats->imissed,
880                        (uint64_t) (stats->ipackets + stats->imissed));
881
882                 if (cur_fwd_eng == &csum_fwd_engine)
883                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
884                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
885                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
886                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
887                         printf("  RX-nombufs:             %14"PRIu64"\n",
888                                stats->rx_nombuf);
889                 }
890
891                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
892                        "%14"PRIu64"\n",
893                        stats->opackets, port->tx_dropped,
894                        (uint64_t) (stats->opackets + port->tx_dropped));
895         }
896
897 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
898         if (port->rx_stream)
899                 pkt_burst_stats_display("RX",
900                         &port->rx_stream->rx_burst_stats);
901         if (port->tx_stream)
902                 pkt_burst_stats_display("TX",
903                         &port->tx_stream->tx_burst_stats);
904 #endif
905
906         if (port->rx_queue_stats_mapping_enabled) {
907                 printf("\n");
908                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
909                         printf("  Stats reg %2d RX-packets:%14"PRIu64
910                                "     RX-errors:%14"PRIu64
911                                "    RX-bytes:%14"PRIu64"\n",
912                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
913                 }
914                 printf("\n");
915         }
916         if (port->tx_queue_stats_mapping_enabled) {
917                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
918                         printf("  Stats reg %2d TX-packets:%14"PRIu64
919                                "                                 TX-bytes:%14"PRIu64"\n",
920                                i, stats->q_opackets[i], stats->q_obytes[i]);
921                 }
922         }
923
924         printf("  %s--------------------------------%s\n",
925                fwd_stats_border, fwd_stats_border);
926 }
927
928 static void
929 fwd_stream_stats_display(streamid_t stream_id)
930 {
931         struct fwd_stream *fs;
932         static const char *fwd_top_stats_border = "-------";
933
934         fs = fwd_streams[stream_id];
935         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
936             (fs->fwd_dropped == 0))
937                 return;
938         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
939                "TX Port=%2d/Queue=%2d %s\n",
940                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
941                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
942         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
943                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
944
945         /* if checksum mode */
946         if (cur_fwd_eng == &csum_fwd_engine) {
947                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
948                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
949         }
950
951 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
952         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
953         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
954 #endif
955 }
956
957 static void
958 flush_fwd_rx_queues(void)
959 {
960         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
961         portid_t  rxp;
962         portid_t port_id;
963         queueid_t rxq;
964         uint16_t  nb_rx;
965         uint16_t  i;
966         uint8_t   j;
967         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
968         uint64_t timer_period;
969
970         /* convert to number of cycles */
971         timer_period = rte_get_timer_hz(); /* 1 second timeout */
972
973         for (j = 0; j < 2; j++) {
974                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
975                         for (rxq = 0; rxq < nb_rxq; rxq++) {
976                                 port_id = fwd_ports_ids[rxp];
977                                 /**
978                                 * testpmd can stuck in the below do while loop
979                                 * if rte_eth_rx_burst() always returns nonzero
980                                 * packets. So timer is added to exit this loop
981                                 * after 1sec timer expiry.
982                                 */
983                                 prev_tsc = rte_rdtsc();
984                                 do {
985                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
986                                                 pkts_burst, MAX_PKT_BURST);
987                                         for (i = 0; i < nb_rx; i++)
988                                                 rte_pktmbuf_free(pkts_burst[i]);
989
990                                         cur_tsc = rte_rdtsc();
991                                         diff_tsc = cur_tsc - prev_tsc;
992                                         timer_tsc += diff_tsc;
993                                 } while ((nb_rx > 0) &&
994                                         (timer_tsc < timer_period));
995                                 timer_tsc = 0;
996                         }
997                 }
998                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
999         }
1000 }
1001
1002 static void
1003 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1004 {
1005         struct fwd_stream **fsm;
1006         streamid_t nb_fs;
1007         streamid_t sm_id;
1008 #ifdef RTE_LIBRTE_BITRATE
1009         uint64_t tics_per_1sec;
1010         uint64_t tics_datum;
1011         uint64_t tics_current;
1012         uint8_t idx_port, cnt_ports;
1013
1014         cnt_ports = rte_eth_dev_count();
1015         tics_datum = rte_rdtsc();
1016         tics_per_1sec = rte_get_timer_hz();
1017 #endif
1018         fsm = &fwd_streams[fc->stream_idx];
1019         nb_fs = fc->stream_nb;
1020         do {
1021                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1022                         (*pkt_fwd)(fsm[sm_id]);
1023 #ifdef RTE_LIBRTE_BITRATE
1024                 if (bitrate_enabled != 0 &&
1025                                 bitrate_lcore_id == rte_lcore_id()) {
1026                         tics_current = rte_rdtsc();
1027                         if (tics_current - tics_datum >= tics_per_1sec) {
1028                                 /* Periodic bitrate calculation */
1029                                 for (idx_port = 0;
1030                                                 idx_port < cnt_ports;
1031                                                 idx_port++)
1032                                         rte_stats_bitrate_calc(bitrate_data,
1033                                                 idx_port);
1034                                 tics_datum = tics_current;
1035                         }
1036                 }
1037 #endif
1038 #ifdef RTE_LIBRTE_LATENCY_STATS
1039                 if (latencystats_enabled != 0 &&
1040                                 latencystats_lcore_id == rte_lcore_id())
1041                         rte_latencystats_update();
1042 #endif
1043
1044         } while (! fc->stopped);
1045 }
1046
1047 static int
1048 start_pkt_forward_on_core(void *fwd_arg)
1049 {
1050         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1051                              cur_fwd_config.fwd_eng->packet_fwd);
1052         return 0;
1053 }
1054
1055 /*
1056  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1057  * Used to start communication flows in network loopback test configurations.
1058  */
1059 static int
1060 run_one_txonly_burst_on_core(void *fwd_arg)
1061 {
1062         struct fwd_lcore *fwd_lc;
1063         struct fwd_lcore tmp_lcore;
1064
1065         fwd_lc = (struct fwd_lcore *) fwd_arg;
1066         tmp_lcore = *fwd_lc;
1067         tmp_lcore.stopped = 1;
1068         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1069         return 0;
1070 }
1071
1072 /*
1073  * Launch packet forwarding:
1074  *     - Setup per-port forwarding context.
1075  *     - launch logical cores with their forwarding configuration.
1076  */
1077 static void
1078 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1079 {
1080         port_fwd_begin_t port_fwd_begin;
1081         unsigned int i;
1082         unsigned int lc_id;
1083         int diag;
1084
1085         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1086         if (port_fwd_begin != NULL) {
1087                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088                         (*port_fwd_begin)(fwd_ports_ids[i]);
1089         }
1090         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1091                 lc_id = fwd_lcores_cpuids[i];
1092                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1093                         fwd_lcores[i]->stopped = 0;
1094                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1095                                                      fwd_lcores[i], lc_id);
1096                         if (diag != 0)
1097                                 printf("launch lcore %u failed - diag=%d\n",
1098                                        lc_id, diag);
1099                 }
1100         }
1101 }
1102
1103 /*
1104  * Launch packet forwarding configuration.
1105  */
1106 void
1107 start_packet_forwarding(int with_tx_first)
1108 {
1109         port_fwd_begin_t port_fwd_begin;
1110         port_fwd_end_t  port_fwd_end;
1111         struct rte_port *port;
1112         unsigned int i;
1113         portid_t   pt_id;
1114         streamid_t sm_id;
1115
1116         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1117                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1118
1119         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1120                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1121
1122         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1123                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1124                 (!nb_rxq || !nb_txq))
1125                 rte_exit(EXIT_FAILURE,
1126                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1127                         cur_fwd_eng->fwd_mode_name);
1128
1129         if (all_ports_started() == 0) {
1130                 printf("Not all ports were started\n");
1131                 return;
1132         }
1133         if (test_done == 0) {
1134                 printf("Packet forwarding already started\n");
1135                 return;
1136         }
1137
1138         if (init_fwd_streams() < 0) {
1139                 printf("Fail from init_fwd_streams()\n");
1140                 return;
1141         }
1142
1143         if(dcb_test) {
1144                 for (i = 0; i < nb_fwd_ports; i++) {
1145                         pt_id = fwd_ports_ids[i];
1146                         port = &ports[pt_id];
1147                         if (!port->dcb_flag) {
1148                                 printf("In DCB mode, all forwarding ports must "
1149                                        "be configured in this mode.\n");
1150                                 return;
1151                         }
1152                 }
1153                 if (nb_fwd_lcores == 1) {
1154                         printf("In DCB mode,the nb forwarding cores "
1155                                "should be larger than 1.\n");
1156                         return;
1157                 }
1158         }
1159         test_done = 0;
1160
1161         if(!no_flush_rx)
1162                 flush_fwd_rx_queues();
1163
1164         fwd_config_setup();
1165         pkt_fwd_config_display(&cur_fwd_config);
1166         rxtx_config_display();
1167
1168         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1169                 pt_id = fwd_ports_ids[i];
1170                 port = &ports[pt_id];
1171                 rte_eth_stats_get(pt_id, &port->stats);
1172                 port->tx_dropped = 0;
1173
1174                 map_port_queue_stats_mapping_registers(pt_id, port);
1175         }
1176         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1177                 fwd_streams[sm_id]->rx_packets = 0;
1178                 fwd_streams[sm_id]->tx_packets = 0;
1179                 fwd_streams[sm_id]->fwd_dropped = 0;
1180                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1181                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1182
1183 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1184                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1185                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1186                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1187                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1188 #endif
1189 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1190                 fwd_streams[sm_id]->core_cycles = 0;
1191 #endif
1192         }
1193         if (with_tx_first) {
1194                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1195                 if (port_fwd_begin != NULL) {
1196                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1197                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1198                 }
1199                 while (with_tx_first--) {
1200                         launch_packet_forwarding(
1201                                         run_one_txonly_burst_on_core);
1202                         rte_eal_mp_wait_lcore();
1203                 }
1204                 port_fwd_end = tx_only_engine.port_fwd_end;
1205                 if (port_fwd_end != NULL) {
1206                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1207                                 (*port_fwd_end)(fwd_ports_ids[i]);
1208                 }
1209         }
1210         launch_packet_forwarding(start_pkt_forward_on_core);
1211 }
1212
1213 void
1214 stop_packet_forwarding(void)
1215 {
1216         struct rte_eth_stats stats;
1217         struct rte_port *port;
1218         port_fwd_end_t  port_fwd_end;
1219         int i;
1220         portid_t   pt_id;
1221         streamid_t sm_id;
1222         lcoreid_t  lc_id;
1223         uint64_t total_recv;
1224         uint64_t total_xmit;
1225         uint64_t total_rx_dropped;
1226         uint64_t total_tx_dropped;
1227         uint64_t total_rx_nombuf;
1228         uint64_t tx_dropped;
1229         uint64_t rx_bad_ip_csum;
1230         uint64_t rx_bad_l4_csum;
1231 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1232         uint64_t fwd_cycles;
1233 #endif
1234
1235         static const char *acc_stats_border = "+++++++++++++++";
1236
1237         if (test_done) {
1238                 printf("Packet forwarding not started\n");
1239                 return;
1240         }
1241         printf("Telling cores to stop...");
1242         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1243                 fwd_lcores[lc_id]->stopped = 1;
1244         printf("\nWaiting for lcores to finish...\n");
1245         rte_eal_mp_wait_lcore();
1246         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1247         if (port_fwd_end != NULL) {
1248                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1249                         pt_id = fwd_ports_ids[i];
1250                         (*port_fwd_end)(pt_id);
1251                 }
1252         }
1253 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1254         fwd_cycles = 0;
1255 #endif
1256         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1257                 if (cur_fwd_config.nb_fwd_streams >
1258                     cur_fwd_config.nb_fwd_ports) {
1259                         fwd_stream_stats_display(sm_id);
1260                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1261                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1262                 } else {
1263                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1264                                 fwd_streams[sm_id];
1265                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1266                                 fwd_streams[sm_id];
1267                 }
1268                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1269                 tx_dropped = (uint64_t) (tx_dropped +
1270                                          fwd_streams[sm_id]->fwd_dropped);
1271                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1272
1273                 rx_bad_ip_csum =
1274                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1275                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1276                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1277                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1278                                                         rx_bad_ip_csum;
1279
1280                 rx_bad_l4_csum =
1281                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1282                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1283                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1284                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1285                                                         rx_bad_l4_csum;
1286
1287 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1288                 fwd_cycles = (uint64_t) (fwd_cycles +
1289                                          fwd_streams[sm_id]->core_cycles);
1290 #endif
1291         }
1292         total_recv = 0;
1293         total_xmit = 0;
1294         total_rx_dropped = 0;
1295         total_tx_dropped = 0;
1296         total_rx_nombuf  = 0;
1297         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1298                 pt_id = fwd_ports_ids[i];
1299
1300                 port = &ports[pt_id];
1301                 rte_eth_stats_get(pt_id, &stats);
1302                 stats.ipackets -= port->stats.ipackets;
1303                 port->stats.ipackets = 0;
1304                 stats.opackets -= port->stats.opackets;
1305                 port->stats.opackets = 0;
1306                 stats.ibytes   -= port->stats.ibytes;
1307                 port->stats.ibytes = 0;
1308                 stats.obytes   -= port->stats.obytes;
1309                 port->stats.obytes = 0;
1310                 stats.imissed  -= port->stats.imissed;
1311                 port->stats.imissed = 0;
1312                 stats.oerrors  -= port->stats.oerrors;
1313                 port->stats.oerrors = 0;
1314                 stats.rx_nombuf -= port->stats.rx_nombuf;
1315                 port->stats.rx_nombuf = 0;
1316
1317                 total_recv += stats.ipackets;
1318                 total_xmit += stats.opackets;
1319                 total_rx_dropped += stats.imissed;
1320                 total_tx_dropped += port->tx_dropped;
1321                 total_rx_nombuf  += stats.rx_nombuf;
1322
1323                 fwd_port_stats_display(pt_id, &stats);
1324         }
1325
1326         printf("\n  %s Accumulated forward statistics for all ports"
1327                "%s\n",
1328                acc_stats_border, acc_stats_border);
1329         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1330                "%-"PRIu64"\n"
1331                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1332                "%-"PRIu64"\n",
1333                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1334                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1335         if (total_rx_nombuf > 0)
1336                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1337         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1338                "%s\n",
1339                acc_stats_border, acc_stats_border);
1340 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1341         if (total_recv > 0)
1342                 printf("\n  CPU cycles/packet=%u (total cycles="
1343                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1344                        (unsigned int)(fwd_cycles / total_recv),
1345                        fwd_cycles, total_recv);
1346 #endif
1347         printf("\nDone.\n");
1348         test_done = 1;
1349 }
1350
1351 void
1352 dev_set_link_up(portid_t pid)
1353 {
1354         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1355                 printf("\nSet link up fail.\n");
1356 }
1357
1358 void
1359 dev_set_link_down(portid_t pid)
1360 {
1361         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1362                 printf("\nSet link down fail.\n");
1363 }
1364
1365 static int
1366 all_ports_started(void)
1367 {
1368         portid_t pi;
1369         struct rte_port *port;
1370
1371         RTE_ETH_FOREACH_DEV(pi) {
1372                 port = &ports[pi];
1373                 /* Check if there is a port which is not started */
1374                 if ((port->port_status != RTE_PORT_STARTED) &&
1375                         (port->slave_flag == 0))
1376                         return 0;
1377         }
1378
1379         /* No port is not started */
1380         return 1;
1381 }
1382
1383 int
1384 all_ports_stopped(void)
1385 {
1386         portid_t pi;
1387         struct rte_port *port;
1388
1389         RTE_ETH_FOREACH_DEV(pi) {
1390                 port = &ports[pi];
1391                 if ((port->port_status != RTE_PORT_STOPPED) &&
1392                         (port->slave_flag == 0))
1393                         return 0;
1394         }
1395
1396         return 1;
1397 }
1398
1399 int
1400 port_is_started(portid_t port_id)
1401 {
1402         if (port_id_is_invalid(port_id, ENABLED_WARN))
1403                 return 0;
1404
1405         if (ports[port_id].port_status != RTE_PORT_STARTED)
1406                 return 0;
1407
1408         return 1;
1409 }
1410
1411 static int
1412 port_is_closed(portid_t port_id)
1413 {
1414         if (port_id_is_invalid(port_id, ENABLED_WARN))
1415                 return 0;
1416
1417         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1418                 return 0;
1419
1420         return 1;
1421 }
1422
1423 int
1424 start_port(portid_t pid)
1425 {
1426         int diag, need_check_link_status = -1;
1427         portid_t pi;
1428         queueid_t qi;
1429         struct rte_port *port;
1430         struct ether_addr mac_addr;
1431         enum rte_eth_event_type event_type;
1432
1433         if (port_id_is_invalid(pid, ENABLED_WARN))
1434                 return 0;
1435
1436         if(dcb_config)
1437                 dcb_test = 1;
1438         RTE_ETH_FOREACH_DEV(pi) {
1439                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1440                         continue;
1441
1442                 need_check_link_status = 0;
1443                 port = &ports[pi];
1444                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1445                                                  RTE_PORT_HANDLING) == 0) {
1446                         printf("Port %d is now not stopped\n", pi);
1447                         continue;
1448                 }
1449
1450                 if (port->need_reconfig > 0) {
1451                         port->need_reconfig = 0;
1452
1453                         if (flow_isolate_all) {
1454                                 int ret = port_flow_isolate(pi, 1);
1455                                 if (ret) {
1456                                         printf("Failed to apply isolated"
1457                                                " mode on port %d\n", pi);
1458                                         return -1;
1459                                 }
1460                         }
1461
1462                         printf("Configuring Port %d (socket %u)\n", pi,
1463                                         port->socket_id);
1464                         /* configure port */
1465                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1466                                                 &(port->dev_conf));
1467                         if (diag != 0) {
1468                                 if (rte_atomic16_cmpset(&(port->port_status),
1469                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1470                                         printf("Port %d can not be set back "
1471                                                         "to stopped\n", pi);
1472                                 printf("Fail to configure port %d\n", pi);
1473                                 /* try to reconfigure port next time */
1474                                 port->need_reconfig = 1;
1475                                 return -1;
1476                         }
1477                 }
1478                 if (port->need_reconfig_queues > 0) {
1479                         port->need_reconfig_queues = 0;
1480                         /* setup tx queues */
1481                         for (qi = 0; qi < nb_txq; qi++) {
1482                                 if ((numa_support) &&
1483                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1484                                         diag = rte_eth_tx_queue_setup(pi, qi,
1485                                                 nb_txd,txring_numa[pi],
1486                                                 &(port->tx_conf));
1487                                 else
1488                                         diag = rte_eth_tx_queue_setup(pi, qi,
1489                                                 nb_txd,port->socket_id,
1490                                                 &(port->tx_conf));
1491
1492                                 if (diag == 0)
1493                                         continue;
1494
1495                                 /* Fail to setup tx queue, return */
1496                                 if (rte_atomic16_cmpset(&(port->port_status),
1497                                                         RTE_PORT_HANDLING,
1498                                                         RTE_PORT_STOPPED) == 0)
1499                                         printf("Port %d can not be set back "
1500                                                         "to stopped\n", pi);
1501                                 printf("Fail to configure port %d tx queues\n", pi);
1502                                 /* try to reconfigure queues next time */
1503                                 port->need_reconfig_queues = 1;
1504                                 return -1;
1505                         }
1506                         /* setup rx queues */
1507                         for (qi = 0; qi < nb_rxq; qi++) {
1508                                 if ((numa_support) &&
1509                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1510                                         struct rte_mempool * mp =
1511                                                 mbuf_pool_find(rxring_numa[pi]);
1512                                         if (mp == NULL) {
1513                                                 printf("Failed to setup RX queue:"
1514                                                         "No mempool allocation"
1515                                                         " on the socket %d\n",
1516                                                         rxring_numa[pi]);
1517                                                 return -1;
1518                                         }
1519
1520                                         diag = rte_eth_rx_queue_setup(pi, qi,
1521                                              nb_rxd,rxring_numa[pi],
1522                                              &(port->rx_conf),mp);
1523                                 } else {
1524                                         struct rte_mempool *mp =
1525                                                 mbuf_pool_find(port->socket_id);
1526                                         if (mp == NULL) {
1527                                                 printf("Failed to setup RX queue:"
1528                                                         "No mempool allocation"
1529                                                         " on the socket %d\n",
1530                                                         port->socket_id);
1531                                                 return -1;
1532                                         }
1533                                         diag = rte_eth_rx_queue_setup(pi, qi,
1534                                              nb_rxd,port->socket_id,
1535                                              &(port->rx_conf), mp);
1536                                 }
1537                                 if (diag == 0)
1538                                         continue;
1539
1540                                 /* Fail to setup rx queue, return */
1541                                 if (rte_atomic16_cmpset(&(port->port_status),
1542                                                         RTE_PORT_HANDLING,
1543                                                         RTE_PORT_STOPPED) == 0)
1544                                         printf("Port %d can not be set back "
1545                                                         "to stopped\n", pi);
1546                                 printf("Fail to configure port %d rx queues\n", pi);
1547                                 /* try to reconfigure queues next time */
1548                                 port->need_reconfig_queues = 1;
1549                                 return -1;
1550                         }
1551                 }
1552
1553                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1554                      event_type < RTE_ETH_EVENT_MAX;
1555                      event_type++) {
1556                         diag = rte_eth_dev_callback_register(pi,
1557                                                         event_type,
1558                                                         eth_event_callback,
1559                                                         NULL);
1560                         if (diag) {
1561                                 printf("Failed to setup even callback for event %d\n",
1562                                         event_type);
1563                                 return -1;
1564                         }
1565                 }
1566
1567                 /* start port */
1568                 if (rte_eth_dev_start(pi) < 0) {
1569                         printf("Fail to start port %d\n", pi);
1570
1571                         /* Fail to setup rx queue, return */
1572                         if (rte_atomic16_cmpset(&(port->port_status),
1573                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1574                                 printf("Port %d can not be set back to "
1575                                                         "stopped\n", pi);
1576                         continue;
1577                 }
1578
1579                 if (rte_atomic16_cmpset(&(port->port_status),
1580                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1581                         printf("Port %d can not be set into started\n", pi);
1582
1583                 rte_eth_macaddr_get(pi, &mac_addr);
1584                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1585                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1586                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1587                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1588
1589                 /* at least one port started, need checking link status */
1590                 need_check_link_status = 1;
1591         }
1592
1593         if (need_check_link_status == 1 && !no_link_check)
1594                 check_all_ports_link_status(RTE_PORT_ALL);
1595         else if (need_check_link_status == 0)
1596                 printf("Please stop the ports first\n");
1597
1598         printf("Done\n");
1599         return 0;
1600 }
1601
1602 void
1603 stop_port(portid_t pid)
1604 {
1605         portid_t pi;
1606         struct rte_port *port;
1607         int need_check_link_status = 0;
1608
1609         if (dcb_test) {
1610                 dcb_test = 0;
1611                 dcb_config = 0;
1612         }
1613
1614         if (port_id_is_invalid(pid, ENABLED_WARN))
1615                 return;
1616
1617         printf("Stopping ports...\n");
1618
1619         RTE_ETH_FOREACH_DEV(pi) {
1620                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1621                         continue;
1622
1623                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1624                         printf("Please remove port %d from forwarding configuration.\n", pi);
1625                         continue;
1626                 }
1627
1628                 if (port_is_bonding_slave(pi)) {
1629                         printf("Please remove port %d from bonded device.\n", pi);
1630                         continue;
1631                 }
1632
1633                 port = &ports[pi];
1634                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1635                                                 RTE_PORT_HANDLING) == 0)
1636                         continue;
1637
1638                 rte_eth_dev_stop(pi);
1639
1640                 if (rte_atomic16_cmpset(&(port->port_status),
1641                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1642                         printf("Port %d can not be set into stopped\n", pi);
1643                 need_check_link_status = 1;
1644         }
1645         if (need_check_link_status && !no_link_check)
1646                 check_all_ports_link_status(RTE_PORT_ALL);
1647
1648         printf("Done\n");
1649 }
1650
1651 void
1652 close_port(portid_t pid)
1653 {
1654         portid_t pi;
1655         struct rte_port *port;
1656
1657         if (port_id_is_invalid(pid, ENABLED_WARN))
1658                 return;
1659
1660         printf("Closing ports...\n");
1661
1662         RTE_ETH_FOREACH_DEV(pi) {
1663                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1664                         continue;
1665
1666                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1667                         printf("Please remove port %d from forwarding configuration.\n", pi);
1668                         continue;
1669                 }
1670
1671                 if (port_is_bonding_slave(pi)) {
1672                         printf("Please remove port %d from bonded device.\n", pi);
1673                         continue;
1674                 }
1675
1676                 port = &ports[pi];
1677                 if (rte_atomic16_cmpset(&(port->port_status),
1678                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1679                         printf("Port %d is already closed\n", pi);
1680                         continue;
1681                 }
1682
1683                 if (rte_atomic16_cmpset(&(port->port_status),
1684                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1685                         printf("Port %d is now not stopped\n", pi);
1686                         continue;
1687                 }
1688
1689                 if (port->flow_list)
1690                         port_flow_flush(pi);
1691                 rte_eth_dev_close(pi);
1692
1693                 if (rte_atomic16_cmpset(&(port->port_status),
1694                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1695                         printf("Port %d cannot be set to closed\n", pi);
1696         }
1697
1698         printf("Done\n");
1699 }
1700
1701 void
1702 reset_port(portid_t pid)
1703 {
1704         int diag;
1705         portid_t pi;
1706         struct rte_port *port;
1707
1708         if (port_id_is_invalid(pid, ENABLED_WARN))
1709                 return;
1710
1711         printf("Resetting ports...\n");
1712
1713         RTE_ETH_FOREACH_DEV(pi) {
1714                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1715                         continue;
1716
1717                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1718                         printf("Please remove port %d from forwarding "
1719                                "configuration.\n", pi);
1720                         continue;
1721                 }
1722
1723                 if (port_is_bonding_slave(pi)) {
1724                         printf("Please remove port %d from bonded device.\n",
1725                                pi);
1726                         continue;
1727                 }
1728
1729                 diag = rte_eth_dev_reset(pi);
1730                 if (diag == 0) {
1731                         port = &ports[pi];
1732                         port->need_reconfig = 1;
1733                         port->need_reconfig_queues = 1;
1734                 } else {
1735                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1736                 }
1737         }
1738
1739         printf("Done\n");
1740 }
1741
1742 void
1743 attach_port(char *identifier)
1744 {
1745         portid_t pi = 0;
1746         unsigned int socket_id;
1747
1748         printf("Attaching a new port...\n");
1749
1750         if (identifier == NULL) {
1751                 printf("Invalid parameters are specified\n");
1752                 return;
1753         }
1754
1755         if (rte_eth_dev_attach(identifier, &pi))
1756                 return;
1757
1758         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1759         /* if socket_id is invalid, set to 0 */
1760         if (check_socket_id(socket_id) < 0)
1761                 socket_id = 0;
1762         reconfig(pi, socket_id);
1763         rte_eth_promiscuous_enable(pi);
1764
1765         nb_ports = rte_eth_dev_count();
1766
1767         ports[pi].port_status = RTE_PORT_STOPPED;
1768
1769         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1770         printf("Done\n");
1771 }
1772
1773 void
1774 detach_port(uint8_t port_id)
1775 {
1776         char name[RTE_ETH_NAME_MAX_LEN];
1777
1778         printf("Detaching a port...\n");
1779
1780         if (!port_is_closed(port_id)) {
1781                 printf("Please close port first\n");
1782                 return;
1783         }
1784
1785         if (ports[port_id].flow_list)
1786                 port_flow_flush(port_id);
1787
1788         if (rte_eth_dev_detach(port_id, name)) {
1789                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1790                 return;
1791         }
1792
1793         nb_ports = rte_eth_dev_count();
1794
1795         printf("Port '%s' is detached. Now total ports is %d\n",
1796                         name, nb_ports);
1797         printf("Done\n");
1798         return;
1799 }
1800
1801 void
1802 pmd_test_exit(void)
1803 {
1804         portid_t pt_id;
1805
1806         if (test_done == 0)
1807                 stop_packet_forwarding();
1808
1809         if (ports != NULL) {
1810                 no_link_check = 1;
1811                 RTE_ETH_FOREACH_DEV(pt_id) {
1812                         printf("\nShutting down port %d...\n", pt_id);
1813                         fflush(stdout);
1814                         stop_port(pt_id);
1815                         close_port(pt_id);
1816                 }
1817         }
1818         printf("\nBye...\n");
1819 }
1820
1821 typedef void (*cmd_func_t)(void);
1822 struct pmd_test_command {
1823         const char *cmd_name;
1824         cmd_func_t cmd_func;
1825 };
1826
1827 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1828
1829 /* Check the link status of all ports in up to 9s, and print them finally */
1830 static void
1831 check_all_ports_link_status(uint32_t port_mask)
1832 {
1833 #define CHECK_INTERVAL 100 /* 100ms */
1834 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1835         portid_t portid;
1836         uint8_t count, all_ports_up, print_flag = 0;
1837         struct rte_eth_link link;
1838
1839         printf("Checking link statuses...\n");
1840         fflush(stdout);
1841         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1842                 all_ports_up = 1;
1843                 RTE_ETH_FOREACH_DEV(portid) {
1844                         if ((port_mask & (1 << portid)) == 0)
1845                                 continue;
1846                         memset(&link, 0, sizeof(link));
1847                         rte_eth_link_get_nowait(portid, &link);
1848                         /* print link status if flag set */
1849                         if (print_flag == 1) {
1850                                 if (link.link_status)
1851                                         printf(
1852                                         "Port%d Link Up. speed %u Mbps- %s\n",
1853                                         portid, link.link_speed,
1854                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1855                                         ("full-duplex") : ("half-duplex\n"));
1856                                 else
1857                                         printf("Port %d Link Down\n", portid);
1858                                 continue;
1859                         }
1860                         /* clear all_ports_up flag if any link down */
1861                         if (link.link_status == ETH_LINK_DOWN) {
1862                                 all_ports_up = 0;
1863                                 break;
1864                         }
1865                 }
1866                 /* after finally printing all link status, get out */
1867                 if (print_flag == 1)
1868                         break;
1869
1870                 if (all_ports_up == 0) {
1871                         fflush(stdout);
1872                         rte_delay_ms(CHECK_INTERVAL);
1873                 }
1874
1875                 /* set the print_flag if all ports up or timeout */
1876                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1877                         print_flag = 1;
1878                 }
1879
1880                 if (lsc_interrupt)
1881                         break;
1882         }
1883 }
1884
1885 static void
1886 rmv_event_callback(void *arg)
1887 {
1888         struct rte_eth_dev *dev;
1889         uint8_t port_id = (intptr_t)arg;
1890
1891         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1892         dev = &rte_eth_devices[port_id];
1893
1894         stop_port(port_id);
1895         close_port(port_id);
1896         printf("removing device %s\n", dev->device->name);
1897         if (rte_eal_dev_detach(dev->device))
1898                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1899                         dev->device->name);
1900 }
1901
1902 /* This function is used by the interrupt thread */
1903 static int
1904 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1905                   void *ret_param)
1906 {
1907         static const char * const event_desc[] = {
1908                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1909                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1910                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1911                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1912                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1913                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1914                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1915                 [RTE_ETH_EVENT_MAX] = NULL,
1916         };
1917
1918         RTE_SET_USED(param);
1919         RTE_SET_USED(ret_param);
1920
1921         if (type >= RTE_ETH_EVENT_MAX) {
1922                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1923                         port_id, __func__, type);
1924                 fflush(stderr);
1925         } else if (event_print_mask & (UINT32_C(1) << type)) {
1926                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1927                         event_desc[type]);
1928                 fflush(stdout);
1929         }
1930
1931         switch (type) {
1932         case RTE_ETH_EVENT_INTR_RMV:
1933                 if (rte_eal_alarm_set(100000,
1934                                 rmv_event_callback, (void *)(intptr_t)port_id))
1935                         fprintf(stderr, "Could not set up deferred device removal\n");
1936                 break;
1937         default:
1938                 break;
1939         }
1940         return 0;
1941 }
1942
1943 static int
1944 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1945 {
1946         uint16_t i;
1947         int diag;
1948         uint8_t mapping_found = 0;
1949
1950         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1951                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1952                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1953                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1954                                         tx_queue_stats_mappings[i].queue_id,
1955                                         tx_queue_stats_mappings[i].stats_counter_id);
1956                         if (diag != 0)
1957                                 return diag;
1958                         mapping_found = 1;
1959                 }
1960         }
1961         if (mapping_found)
1962                 port->tx_queue_stats_mapping_enabled = 1;
1963         return 0;
1964 }
1965
1966 static int
1967 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1968 {
1969         uint16_t i;
1970         int diag;
1971         uint8_t mapping_found = 0;
1972
1973         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1974                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1975                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1976                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1977                                         rx_queue_stats_mappings[i].queue_id,
1978                                         rx_queue_stats_mappings[i].stats_counter_id);
1979                         if (diag != 0)
1980                                 return diag;
1981                         mapping_found = 1;
1982                 }
1983         }
1984         if (mapping_found)
1985                 port->rx_queue_stats_mapping_enabled = 1;
1986         return 0;
1987 }
1988
1989 static void
1990 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1991 {
1992         int diag = 0;
1993
1994         diag = set_tx_queue_stats_mapping_registers(pi, port);
1995         if (diag != 0) {
1996                 if (diag == -ENOTSUP) {
1997                         port->tx_queue_stats_mapping_enabled = 0;
1998                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1999                 }
2000                 else
2001                         rte_exit(EXIT_FAILURE,
2002                                         "set_tx_queue_stats_mapping_registers "
2003                                         "failed for port id=%d diag=%d\n",
2004                                         pi, diag);
2005         }
2006
2007         diag = set_rx_queue_stats_mapping_registers(pi, port);
2008         if (diag != 0) {
2009                 if (diag == -ENOTSUP) {
2010                         port->rx_queue_stats_mapping_enabled = 0;
2011                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2012                 }
2013                 else
2014                         rte_exit(EXIT_FAILURE,
2015                                         "set_rx_queue_stats_mapping_registers "
2016                                         "failed for port id=%d diag=%d\n",
2017                                         pi, diag);
2018         }
2019 }
2020
2021 static void
2022 rxtx_port_config(struct rte_port *port)
2023 {
2024         port->rx_conf = port->dev_info.default_rxconf;
2025         port->tx_conf = port->dev_info.default_txconf;
2026
2027         /* Check if any RX/TX parameters have been passed */
2028         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2029                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2030
2031         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2032                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2033
2034         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2035                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2036
2037         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2038                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2039
2040         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2041                 port->rx_conf.rx_drop_en = rx_drop_en;
2042
2043         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2044                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2045
2046         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2047                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2048
2049         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2050                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2051
2052         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2053                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2054
2055         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2056                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2057
2058         if (txq_flags != RTE_PMD_PARAM_UNSET)
2059                 port->tx_conf.txq_flags = txq_flags;
2060 }
2061
2062 void
2063 init_port_config(void)
2064 {
2065         portid_t pid;
2066         struct rte_port *port;
2067
2068         RTE_ETH_FOREACH_DEV(pid) {
2069                 port = &ports[pid];
2070                 port->dev_conf.rxmode = rx_mode;
2071                 port->dev_conf.fdir_conf = fdir_conf;
2072                 if (nb_rxq > 1) {
2073                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2074                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2075                 } else {
2076                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2077                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2078                 }
2079
2080                 if (port->dcb_flag == 0) {
2081                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2082                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2083                         else
2084                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2085                 }
2086
2087                 rxtx_port_config(port);
2088
2089                 rte_eth_macaddr_get(pid, &port->eth_addr);
2090
2091                 map_port_queue_stats_mapping_registers(pid, port);
2092 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2093                 rte_pmd_ixgbe_bypass_init(pid);
2094 #endif
2095
2096                 if (lsc_interrupt &&
2097                     (rte_eth_devices[pid].data->dev_flags &
2098                      RTE_ETH_DEV_INTR_LSC))
2099                         port->dev_conf.intr_conf.lsc = 1;
2100                 if (rmv_interrupt &&
2101                     (rte_eth_devices[pid].data->dev_flags &
2102                      RTE_ETH_DEV_INTR_RMV))
2103                         port->dev_conf.intr_conf.rmv = 1;
2104         }
2105 }
2106
2107 void set_port_slave_flag(portid_t slave_pid)
2108 {
2109         struct rte_port *port;
2110
2111         port = &ports[slave_pid];
2112         port->slave_flag = 1;
2113 }
2114
2115 void clear_port_slave_flag(portid_t slave_pid)
2116 {
2117         struct rte_port *port;
2118
2119         port = &ports[slave_pid];
2120         port->slave_flag = 0;
2121 }
2122
2123 uint8_t port_is_bonding_slave(portid_t slave_pid)
2124 {
2125         struct rte_port *port;
2126
2127         port = &ports[slave_pid];
2128         return port->slave_flag;
2129 }
2130
2131 const uint16_t vlan_tags[] = {
2132                 0,  1,  2,  3,  4,  5,  6,  7,
2133                 8,  9, 10, 11,  12, 13, 14, 15,
2134                 16, 17, 18, 19, 20, 21, 22, 23,
2135                 24, 25, 26, 27, 28, 29, 30, 31
2136 };
2137
2138 static  int
2139 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2140                  enum dcb_mode_enable dcb_mode,
2141                  enum rte_eth_nb_tcs num_tcs,
2142                  uint8_t pfc_en)
2143 {
2144         uint8_t i;
2145
2146         /*
2147          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2148          * given above, and the number of traffic classes available for use.
2149          */
2150         if (dcb_mode == DCB_VT_ENABLED) {
2151                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2152                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2153                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2154                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2155
2156                 /* VMDQ+DCB RX and TX configurations */
2157                 vmdq_rx_conf->enable_default_pool = 0;
2158                 vmdq_rx_conf->default_pool = 0;
2159                 vmdq_rx_conf->nb_queue_pools =
2160                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2161                 vmdq_tx_conf->nb_queue_pools =
2162                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2163
2164                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2165                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2166                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2167                         vmdq_rx_conf->pool_map[i].pools =
2168                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2169                 }
2170                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2171                         vmdq_rx_conf->dcb_tc[i] = i;
2172                         vmdq_tx_conf->dcb_tc[i] = i;
2173                 }
2174
2175                 /* set DCB mode of RX and TX of multiple queues */
2176                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2177                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2178         } else {
2179                 struct rte_eth_dcb_rx_conf *rx_conf =
2180                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2181                 struct rte_eth_dcb_tx_conf *tx_conf =
2182                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2183
2184                 rx_conf->nb_tcs = num_tcs;
2185                 tx_conf->nb_tcs = num_tcs;
2186
2187                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2188                         rx_conf->dcb_tc[i] = i % num_tcs;
2189                         tx_conf->dcb_tc[i] = i % num_tcs;
2190                 }
2191                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2192                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2193                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2194         }
2195
2196         if (pfc_en)
2197                 eth_conf->dcb_capability_en =
2198                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2199         else
2200                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2201
2202         return 0;
2203 }
2204
2205 int
2206 init_port_dcb_config(portid_t pid,
2207                      enum dcb_mode_enable dcb_mode,
2208                      enum rte_eth_nb_tcs num_tcs,
2209                      uint8_t pfc_en)
2210 {
2211         struct rte_eth_conf port_conf;
2212         struct rte_port *rte_port;
2213         int retval;
2214         uint16_t i;
2215
2216         rte_port = &ports[pid];
2217
2218         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2219         /* Enter DCB configuration status */
2220         dcb_config = 1;
2221
2222         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2223         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2224         if (retval < 0)
2225                 return retval;
2226         port_conf.rxmode.hw_vlan_filter = 1;
2227
2228         /**
2229          * Write the configuration into the device.
2230          * Set the numbers of RX & TX queues to 0, so
2231          * the RX & TX queues will not be setup.
2232          */
2233         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2234
2235         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2236
2237         /* If dev_info.vmdq_pool_base is greater than 0,
2238          * the queue id of vmdq pools is started after pf queues.
2239          */
2240         if (dcb_mode == DCB_VT_ENABLED &&
2241             rte_port->dev_info.vmdq_pool_base > 0) {
2242                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2243                         " for port %d.", pid);
2244                 return -1;
2245         }
2246
2247         /* Assume the ports in testpmd have the same dcb capability
2248          * and has the same number of rxq and txq in dcb mode
2249          */
2250         if (dcb_mode == DCB_VT_ENABLED) {
2251                 if (rte_port->dev_info.max_vfs > 0) {
2252                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2253                         nb_txq = rte_port->dev_info.nb_tx_queues;
2254                 } else {
2255                         nb_rxq = rte_port->dev_info.max_rx_queues;
2256                         nb_txq = rte_port->dev_info.max_tx_queues;
2257                 }
2258         } else {
2259                 /*if vt is disabled, use all pf queues */
2260                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2261                         nb_rxq = rte_port->dev_info.max_rx_queues;
2262                         nb_txq = rte_port->dev_info.max_tx_queues;
2263                 } else {
2264                         nb_rxq = (queueid_t)num_tcs;
2265                         nb_txq = (queueid_t)num_tcs;
2266
2267                 }
2268         }
2269         rx_free_thresh = 64;
2270
2271         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2272
2273         rxtx_port_config(rte_port);
2274         /* VLAN filter */
2275         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2276         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2277                 rx_vft_set(pid, vlan_tags[i], 1);
2278
2279         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2280         map_port_queue_stats_mapping_registers(pid, rte_port);
2281
2282         rte_port->dcb_flag = 1;
2283
2284         return 0;
2285 }
2286
2287 static void
2288 init_port(void)
2289 {
2290         /* Configuration of Ethernet ports. */
2291         ports = rte_zmalloc("testpmd: ports",
2292                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2293                             RTE_CACHE_LINE_SIZE);
2294         if (ports == NULL) {
2295                 rte_exit(EXIT_FAILURE,
2296                                 "rte_zmalloc(%d struct rte_port) failed\n",
2297                                 RTE_MAX_ETHPORTS);
2298         }
2299 }
2300
2301 static void
2302 force_quit(void)
2303 {
2304         pmd_test_exit();
2305         prompt_exit();
2306 }
2307
2308 static void
2309 print_stats(void)
2310 {
2311         uint8_t i;
2312         const char clr[] = { 27, '[', '2', 'J', '\0' };
2313         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2314
2315         /* Clear screen and move to top left */
2316         printf("%s%s", clr, top_left);
2317
2318         printf("\nPort statistics ====================================");
2319         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2320                 nic_stats_display(fwd_ports_ids[i]);
2321 }
2322
2323 static void
2324 signal_handler(int signum)
2325 {
2326         if (signum == SIGINT || signum == SIGTERM) {
2327                 printf("\nSignal %d received, preparing to exit...\n",
2328                                 signum);
2329 #ifdef RTE_LIBRTE_PDUMP
2330                 /* uninitialize packet capture framework */
2331                 rte_pdump_uninit();
2332 #endif
2333 #ifdef RTE_LIBRTE_LATENCY_STATS
2334                 rte_latencystats_uninit();
2335 #endif
2336                 force_quit();
2337                 /* exit with the expected status */
2338                 signal(signum, SIG_DFL);
2339                 kill(getpid(), signum);
2340         }
2341 }
2342
2343 int
2344 main(int argc, char** argv)
2345 {
2346         int  diag;
2347         portid_t port_id;
2348
2349         signal(SIGINT, signal_handler);
2350         signal(SIGTERM, signal_handler);
2351
2352         diag = rte_eal_init(argc, argv);
2353         if (diag < 0)
2354                 rte_panic("Cannot init EAL\n");
2355
2356         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2357                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2358                         strerror(errno));
2359         }
2360
2361 #ifdef RTE_LIBRTE_PDUMP
2362         /* initialize packet capture framework */
2363         rte_pdump_init(NULL);
2364 #endif
2365
2366         nb_ports = (portid_t) rte_eth_dev_count();
2367         if (nb_ports == 0)
2368                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2369
2370         /* allocate port structures, and init them */
2371         init_port();
2372
2373         set_def_fwd_config();
2374         if (nb_lcores == 0)
2375                 rte_panic("Empty set of forwarding logical cores - check the "
2376                           "core mask supplied in the command parameters\n");
2377
2378         /* Bitrate/latency stats disabled by default */
2379 #ifdef RTE_LIBRTE_BITRATE
2380         bitrate_enabled = 0;
2381 #endif
2382 #ifdef RTE_LIBRTE_LATENCY_STATS
2383         latencystats_enabled = 0;
2384 #endif
2385
2386         argc -= diag;
2387         argv += diag;
2388         if (argc > 1)
2389                 launch_args_parse(argc, argv);
2390
2391         if (tx_first && interactive)
2392                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2393                                 "interactive mode.\n");
2394
2395         if (tx_first && lsc_interrupt) {
2396                 printf("Warning: lsc_interrupt needs to be off when "
2397                                 " using tx_first. Disabling.\n");
2398                 lsc_interrupt = 0;
2399         }
2400
2401         if (!nb_rxq && !nb_txq)
2402                 printf("Warning: Either rx or tx queues should be non-zero\n");
2403
2404         if (nb_rxq > 1 && nb_rxq > nb_txq)
2405                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2406                        "but nb_txq=%d will prevent to fully test it.\n",
2407                        nb_rxq, nb_txq);
2408
2409         init_config();
2410         if (start_port(RTE_PORT_ALL) != 0)
2411                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2412
2413         /* set all ports to promiscuous mode by default */
2414         RTE_ETH_FOREACH_DEV(port_id)
2415                 rte_eth_promiscuous_enable(port_id);
2416
2417         /* Init metrics library */
2418         rte_metrics_init(rte_socket_id());
2419
2420 #ifdef RTE_LIBRTE_LATENCY_STATS
2421         if (latencystats_enabled != 0) {
2422                 int ret = rte_latencystats_init(1, NULL);
2423                 if (ret)
2424                         printf("Warning: latencystats init()"
2425                                 " returned error %d\n", ret);
2426                 printf("Latencystats running on lcore %d\n",
2427                         latencystats_lcore_id);
2428         }
2429 #endif
2430
2431         /* Setup bitrate stats */
2432 #ifdef RTE_LIBRTE_BITRATE
2433         if (bitrate_enabled != 0) {
2434                 bitrate_data = rte_stats_bitrate_create();
2435                 if (bitrate_data == NULL)
2436                         rte_exit(EXIT_FAILURE,
2437                                 "Could not allocate bitrate data.\n");
2438                 rte_stats_bitrate_reg(bitrate_data);
2439         }
2440 #endif
2441
2442 #ifdef RTE_LIBRTE_CMDLINE
2443         if (strlen(cmdline_filename) != 0)
2444                 cmdline_read_from_file(cmdline_filename);
2445
2446         if (interactive == 1) {
2447                 if (auto_start) {
2448                         printf("Start automatic packet forwarding\n");
2449                         start_packet_forwarding(0);
2450                 }
2451                 prompt();
2452                 pmd_test_exit();
2453         } else
2454 #endif
2455         {
2456                 char c;
2457                 int rc;
2458
2459                 printf("No commandline core given, start packet forwarding\n");
2460                 start_packet_forwarding(tx_first);
2461                 if (stats_period != 0) {
2462                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2463                         uint64_t timer_period;
2464
2465                         /* Convert to number of cycles */
2466                         timer_period = stats_period * rte_get_timer_hz();
2467
2468                         while (1) {
2469                                 cur_time = rte_get_timer_cycles();
2470                                 diff_time += cur_time - prev_time;
2471
2472                                 if (diff_time >= timer_period) {
2473                                         print_stats();
2474                                         /* Reset the timer */
2475                                         diff_time = 0;
2476                                 }
2477                                 /* Sleep to avoid unnecessary checks */
2478                                 prev_time = cur_time;
2479                                 sleep(1);
2480                         }
2481                 }
2482
2483                 printf("Press enter to exit\n");
2484                 rc = read(0, &c, 1);
2485                 pmd_test_exit();
2486                 if (rc < 0)
2487                         return 1;
2488         }
2489
2490         return 0;
2491 }