667c2286f6d02f50db7abf63f582295fafd152ab
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91
92 #include "testpmd.h"
93
94 uint16_t verbose_level = 0; /**< Silent by default. */
95
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;        /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159         &io_fwd_engine,
160         &mac_fwd_engine,
161         &mac_swap_engine,
162         &flow_gen_engine,
163         &rx_only_engine,
164         &tx_only_engine,
165         &csum_fwd_engine,
166         &icmp_echo_engine,
167 #ifdef RTE_LIBRTE_IEEE1588
168         &ieee1588_fwd_engine,
169 #endif
170         NULL,
171 };
172
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
178
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
181                                       * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
183
184 /*
185  * In container, it cannot terminate the process which running with 'stats-period'
186  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
187  */
188 uint8_t f_quit;
189
190 /*
191  * Configuration of packet segments used by the "txonly" processing engine.
192  */
193 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
194 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
195         TXONLY_DEF_PACKET_LEN,
196 };
197 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
198
199 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
200 /**< Split policy for packets to TX. */
201
202 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
203 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
204
205 /* current configuration is in DCB or not,0 means it is not in DCB mode */
206 uint8_t dcb_config = 0;
207
208 /* Whether the dcb is in testing status */
209 uint8_t dcb_test = 0;
210
211 /*
212  * Configurable number of RX/TX queues.
213  */
214 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
215 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
216
217 /*
218  * Configurable number of RX/TX ring descriptors.
219  */
220 #define RTE_TEST_RX_DESC_DEFAULT 128
221 #define RTE_TEST_TX_DESC_DEFAULT 512
222 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
223 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
224
225 #define RTE_PMD_PARAM_UNSET -1
226 /*
227  * Configurable values of RX and TX ring threshold registers.
228  */
229
230 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
233
234 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Configurable value of RX free threshold.
240  */
241 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
242
243 /*
244  * Configurable value of RX drop enable.
245  */
246 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
247
248 /*
249  * Configurable value of TX free threshold.
250  */
251 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
252
253 /*
254  * Configurable value of TX RS bit threshold.
255  */
256 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
257
258 /*
259  * Configurable value of TX queue flags.
260  */
261 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
262
263 /*
264  * Receive Side Scaling (RSS) configuration.
265  */
266 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
267
268 /*
269  * Port topology configuration
270  */
271 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
272
273 /*
274  * Avoids to flush all the RX streams before starts forwarding.
275  */
276 uint8_t no_flush_rx = 0; /* flush by default */
277
278 /*
279  * Flow API isolated mode.
280  */
281 uint8_t flow_isolate_all;
282
283 /*
284  * Avoids to check link status when starting/stopping a port.
285  */
286 uint8_t no_link_check = 0; /* check by default */
287
288 /*
289  * Enable link status change notification
290  */
291 uint8_t lsc_interrupt = 1; /* enabled by default */
292
293 /*
294  * Enable device removal notification.
295  */
296 uint8_t rmv_interrupt = 1; /* enabled by default */
297
298 /*
299  * Display or mask ether events
300  * Default to all events except VF_MBOX
301  */
302 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
303                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
304                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
305                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
306                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
307                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
308
309 /*
310  * NIC bypass mode configuration options.
311  */
312
313 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
314 /* The NIC bypass watchdog timeout. */
315 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
316 #endif
317
318
319 #ifdef RTE_LIBRTE_LATENCY_STATS
320
321 /*
322  * Set when latency stats is enabled in the commandline
323  */
324 uint8_t latencystats_enabled;
325
326 /*
327  * Lcore ID to serive latency statistics.
328  */
329 lcoreid_t latencystats_lcore_id = -1;
330
331 #endif
332
333 /*
334  * Ethernet device configuration.
335  */
336 struct rte_eth_rxmode rx_mode = {
337         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
338         .split_hdr_size = 0,
339         .header_split   = 0, /**< Header Split disabled. */
340         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
341         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
342         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
343         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
344         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
345         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
346 };
347
348 struct rte_fdir_conf fdir_conf = {
349         .mode = RTE_FDIR_MODE_NONE,
350         .pballoc = RTE_FDIR_PBALLOC_64K,
351         .status = RTE_FDIR_REPORT_STATUS,
352         .mask = {
353                 .vlan_tci_mask = 0x0,
354                 .ipv4_mask     = {
355                         .src_ip = 0xFFFFFFFF,
356                         .dst_ip = 0xFFFFFFFF,
357                 },
358                 .ipv6_mask     = {
359                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
360                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
361                 },
362                 .src_port_mask = 0xFFFF,
363                 .dst_port_mask = 0xFFFF,
364                 .mac_addr_byte_mask = 0xFF,
365                 .tunnel_type_mask = 1,
366                 .tunnel_id_mask = 0xFFFFFFFF,
367         },
368         .drop_queue = 127,
369 };
370
371 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
372
373 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
374 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
375
376 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
377 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
378
379 uint16_t nb_tx_queue_stats_mappings = 0;
380 uint16_t nb_rx_queue_stats_mappings = 0;
381
382 unsigned int num_sockets = 0;
383 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
384
385 #ifdef RTE_LIBRTE_BITRATE
386 /* Bitrate statistics */
387 struct rte_stats_bitrates *bitrate_data;
388 lcoreid_t bitrate_lcore_id;
389 uint8_t bitrate_enabled;
390 #endif
391
392 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
393 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394
395 /* Forward function declarations */
396 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
397 static void check_all_ports_link_status(uint32_t port_mask);
398 static int eth_event_callback(portid_t port_id,
399                               enum rte_eth_event_type type,
400                               void *param, void *ret_param);
401
402 /*
403  * Check if all the ports are started.
404  * If yes, return positive value. If not, return zero.
405  */
406 static int all_ports_started(void);
407
408 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
409 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
410
411 /*
412  * Helper function to check if socket is already discovered.
413  * If yes, return positive value. If not, return zero.
414  */
415 int
416 new_socket_id(unsigned int socket_id)
417 {
418         unsigned int i;
419
420         for (i = 0; i < num_sockets; i++) {
421                 if (socket_ids[i] == socket_id)
422                         return 0;
423         }
424         return 1;
425 }
426
427 /*
428  * Setup default configuration.
429  */
430 static void
431 set_default_fwd_lcores_config(void)
432 {
433         unsigned int i;
434         unsigned int nb_lc;
435         unsigned int sock_num;
436
437         nb_lc = 0;
438         for (i = 0; i < RTE_MAX_LCORE; i++) {
439                 sock_num = rte_lcore_to_socket_id(i);
440                 if (new_socket_id(sock_num)) {
441                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
442                                 rte_exit(EXIT_FAILURE,
443                                          "Total sockets greater than %u\n",
444                                          RTE_MAX_NUMA_NODES);
445                         }
446                         socket_ids[num_sockets++] = sock_num;
447                 }
448                 if (!rte_lcore_is_enabled(i))
449                         continue;
450                 if (i == rte_get_master_lcore())
451                         continue;
452                 fwd_lcores_cpuids[nb_lc++] = i;
453         }
454         nb_lcores = (lcoreid_t) nb_lc;
455         nb_cfg_lcores = nb_lcores;
456         nb_fwd_lcores = 1;
457 }
458
459 static void
460 set_def_peer_eth_addrs(void)
461 {
462         portid_t i;
463
464         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
465                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
466                 peer_eth_addrs[i].addr_bytes[5] = i;
467         }
468 }
469
470 static void
471 set_default_fwd_ports_config(void)
472 {
473         portid_t pt_id;
474         int i = 0;
475
476         RTE_ETH_FOREACH_DEV(pt_id)
477                 fwd_ports_ids[i++] = pt_id;
478
479         nb_cfg_ports = nb_ports;
480         nb_fwd_ports = nb_ports;
481 }
482
483 void
484 set_def_fwd_config(void)
485 {
486         set_default_fwd_lcores_config();
487         set_def_peer_eth_addrs();
488         set_default_fwd_ports_config();
489 }
490
491 /*
492  * Configuration initialisation done once at init time.
493  */
494 static void
495 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
496                  unsigned int socket_id)
497 {
498         char pool_name[RTE_MEMPOOL_NAMESIZE];
499         struct rte_mempool *rte_mp = NULL;
500         uint32_t mb_size;
501
502         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
503         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
504
505         RTE_LOG(INFO, USER1,
506                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
507                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
508
509         if (mp_anon != 0) {
510                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
511                         mb_size, (unsigned) mb_mempool_cache,
512                         sizeof(struct rte_pktmbuf_pool_private),
513                         socket_id, 0);
514                 if (rte_mp == NULL)
515                         goto err;
516
517                 if (rte_mempool_populate_anon(rte_mp) == 0) {
518                         rte_mempool_free(rte_mp);
519                         rte_mp = NULL;
520                         goto err;
521                 }
522                 rte_pktmbuf_pool_init(rte_mp, NULL);
523                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
524         } else {
525                 /* wrapper to rte_mempool_create() */
526                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
527                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
528         }
529
530 err:
531         if (rte_mp == NULL) {
532                 rte_exit(EXIT_FAILURE,
533                         "Creation of mbuf pool for socket %u failed: %s\n",
534                         socket_id, rte_strerror(rte_errno));
535         } else if (verbose_level > 0) {
536                 rte_mempool_dump(stdout, rte_mp);
537         }
538 }
539
540 /*
541  * Check given socket id is valid or not with NUMA mode,
542  * if valid, return 0, else return -1
543  */
544 static int
545 check_socket_id(const unsigned int socket_id)
546 {
547         static int warning_once = 0;
548
549         if (new_socket_id(socket_id)) {
550                 if (!warning_once && numa_support)
551                         printf("Warning: NUMA should be configured manually by"
552                                " using --port-numa-config and"
553                                " --ring-numa-config parameters along with"
554                                " --numa.\n");
555                 warning_once = 1;
556                 return -1;
557         }
558         return 0;
559 }
560
561 static void
562 init_config(void)
563 {
564         portid_t pid;
565         struct rte_port *port;
566         struct rte_mempool *mbp;
567         unsigned int nb_mbuf_per_pool;
568         lcoreid_t  lc_id;
569         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
570         struct rte_gro_param gro_param;
571         uint32_t gso_types;
572
573         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
574
575         if (numa_support) {
576                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
577                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
578                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
579         }
580
581         /* Configuration of logical cores. */
582         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
583                                 sizeof(struct fwd_lcore *) * nb_lcores,
584                                 RTE_CACHE_LINE_SIZE);
585         if (fwd_lcores == NULL) {
586                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
587                                                         "failed\n", nb_lcores);
588         }
589         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
590                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
591                                                sizeof(struct fwd_lcore),
592                                                RTE_CACHE_LINE_SIZE);
593                 if (fwd_lcores[lc_id] == NULL) {
594                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
595                                                                 "failed\n");
596                 }
597                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
598         }
599
600         RTE_ETH_FOREACH_DEV(pid) {
601                 port = &ports[pid];
602                 rte_eth_dev_info_get(pid, &port->dev_info);
603
604                 if (numa_support) {
605                         if (port_numa[pid] != NUMA_NO_CONFIG)
606                                 port_per_socket[port_numa[pid]]++;
607                         else {
608                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
609
610                                 /* if socket_id is invalid, set to 0 */
611                                 if (check_socket_id(socket_id) < 0)
612                                         socket_id = 0;
613                                 port_per_socket[socket_id]++;
614                         }
615                 }
616
617                 /* set flag to initialize port/queue */
618                 port->need_reconfig = 1;
619                 port->need_reconfig_queues = 1;
620         }
621
622         /*
623          * Create pools of mbuf.
624          * If NUMA support is disabled, create a single pool of mbuf in
625          * socket 0 memory by default.
626          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
627          *
628          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
629          * nb_txd can be configured at run time.
630          */
631         if (param_total_num_mbufs)
632                 nb_mbuf_per_pool = param_total_num_mbufs;
633         else {
634                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
635                         (nb_lcores * mb_mempool_cache) +
636                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
637                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
638         }
639
640         if (numa_support) {
641                 uint8_t i;
642
643                 for (i = 0; i < num_sockets; i++)
644                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
645                                          socket_ids[i]);
646         } else {
647                 if (socket_num == UMA_NO_CONFIG)
648                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
649                 else
650                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
651                                                  socket_num);
652         }
653
654         init_port_config();
655
656         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
657                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
658         /*
659          * Records which Mbuf pool to use by each logical core, if needed.
660          */
661         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
662                 mbp = mbuf_pool_find(
663                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
664
665                 if (mbp == NULL)
666                         mbp = mbuf_pool_find(0);
667                 fwd_lcores[lc_id]->mbp = mbp;
668                 /* initialize GSO context */
669                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
670                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
671                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
672                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
673                         ETHER_CRC_LEN;
674                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
675         }
676
677         /* Configuration of packet forwarding streams. */
678         if (init_fwd_streams() < 0)
679                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
680
681         fwd_config_setup();
682
683         /* create a gro context for each lcore */
684         gro_param.gro_types = RTE_GRO_TCP_IPV4;
685         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
686         gro_param.max_item_per_flow = MAX_PKT_BURST;
687         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
688                 gro_param.socket_id = rte_lcore_to_socket_id(
689                                 fwd_lcores_cpuids[lc_id]);
690                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
691                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
692                         rte_exit(EXIT_FAILURE,
693                                         "rte_gro_ctx_create() failed\n");
694                 }
695         }
696 }
697
698
699 void
700 reconfig(portid_t new_port_id, unsigned socket_id)
701 {
702         struct rte_port *port;
703
704         /* Reconfiguration of Ethernet ports. */
705         port = &ports[new_port_id];
706         rte_eth_dev_info_get(new_port_id, &port->dev_info);
707
708         /* set flag to initialize port/queue */
709         port->need_reconfig = 1;
710         port->need_reconfig_queues = 1;
711         port->socket_id = socket_id;
712
713         init_port_config();
714 }
715
716
717 int
718 init_fwd_streams(void)
719 {
720         portid_t pid;
721         struct rte_port *port;
722         streamid_t sm_id, nb_fwd_streams_new;
723         queueid_t q;
724
725         /* set socket id according to numa or not */
726         RTE_ETH_FOREACH_DEV(pid) {
727                 port = &ports[pid];
728                 if (nb_rxq > port->dev_info.max_rx_queues) {
729                         printf("Fail: nb_rxq(%d) is greater than "
730                                 "max_rx_queues(%d)\n", nb_rxq,
731                                 port->dev_info.max_rx_queues);
732                         return -1;
733                 }
734                 if (nb_txq > port->dev_info.max_tx_queues) {
735                         printf("Fail: nb_txq(%d) is greater than "
736                                 "max_tx_queues(%d)\n", nb_txq,
737                                 port->dev_info.max_tx_queues);
738                         return -1;
739                 }
740                 if (numa_support) {
741                         if (port_numa[pid] != NUMA_NO_CONFIG)
742                                 port->socket_id = port_numa[pid];
743                         else {
744                                 port->socket_id = rte_eth_dev_socket_id(pid);
745
746                                 /* if socket_id is invalid, set to 0 */
747                                 if (check_socket_id(port->socket_id) < 0)
748                                         port->socket_id = 0;
749                         }
750                 }
751                 else {
752                         if (socket_num == UMA_NO_CONFIG)
753                                 port->socket_id = 0;
754                         else
755                                 port->socket_id = socket_num;
756                 }
757         }
758
759         q = RTE_MAX(nb_rxq, nb_txq);
760         if (q == 0) {
761                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
762                 return -1;
763         }
764         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
765         if (nb_fwd_streams_new == nb_fwd_streams)
766                 return 0;
767         /* clear the old */
768         if (fwd_streams != NULL) {
769                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
770                         if (fwd_streams[sm_id] == NULL)
771                                 continue;
772                         rte_free(fwd_streams[sm_id]);
773                         fwd_streams[sm_id] = NULL;
774                 }
775                 rte_free(fwd_streams);
776                 fwd_streams = NULL;
777         }
778
779         /* init new */
780         nb_fwd_streams = nb_fwd_streams_new;
781         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
782                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
783         if (fwd_streams == NULL)
784                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
785                                                 "failed\n", nb_fwd_streams);
786
787         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
788                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
789                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
790                 if (fwd_streams[sm_id] == NULL)
791                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
792                                                                 " failed\n");
793         }
794
795         return 0;
796 }
797
798 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
799 static void
800 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
801 {
802         unsigned int total_burst;
803         unsigned int nb_burst;
804         unsigned int burst_stats[3];
805         uint16_t pktnb_stats[3];
806         uint16_t nb_pkt;
807         int burst_percent[3];
808
809         /*
810          * First compute the total number of packet bursts and the
811          * two highest numbers of bursts of the same number of packets.
812          */
813         total_burst = 0;
814         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
815         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
816         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
817                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
818                 if (nb_burst == 0)
819                         continue;
820                 total_burst += nb_burst;
821                 if (nb_burst > burst_stats[0]) {
822                         burst_stats[1] = burst_stats[0];
823                         pktnb_stats[1] = pktnb_stats[0];
824                         burst_stats[0] = nb_burst;
825                         pktnb_stats[0] = nb_pkt;
826                 }
827         }
828         if (total_burst == 0)
829                 return;
830         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
831         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
832                burst_percent[0], (int) pktnb_stats[0]);
833         if (burst_stats[0] == total_burst) {
834                 printf("]\n");
835                 return;
836         }
837         if (burst_stats[0] + burst_stats[1] == total_burst) {
838                 printf(" + %d%% of %d pkts]\n",
839                        100 - burst_percent[0], pktnb_stats[1]);
840                 return;
841         }
842         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
843         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
844         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
845                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
846                 return;
847         }
848         printf(" + %d%% of %d pkts + %d%% of others]\n",
849                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
850 }
851 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
852
853 static void
854 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
855 {
856         struct rte_port *port;
857         uint8_t i;
858
859         static const char *fwd_stats_border = "----------------------";
860
861         port = &ports[port_id];
862         printf("\n  %s Forward statistics for port %-2d %s\n",
863                fwd_stats_border, port_id, fwd_stats_border);
864
865         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
866                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
867                        "%-"PRIu64"\n",
868                        stats->ipackets, stats->imissed,
869                        (uint64_t) (stats->ipackets + stats->imissed));
870
871                 if (cur_fwd_eng == &csum_fwd_engine)
872                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
873                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
874                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
875                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
876                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
877                 }
878
879                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
880                        "%-"PRIu64"\n",
881                        stats->opackets, port->tx_dropped,
882                        (uint64_t) (stats->opackets + port->tx_dropped));
883         }
884         else {
885                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
886                        "%14"PRIu64"\n",
887                        stats->ipackets, stats->imissed,
888                        (uint64_t) (stats->ipackets + stats->imissed));
889
890                 if (cur_fwd_eng == &csum_fwd_engine)
891                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
892                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
893                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
894                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
895                         printf("  RX-nombufs:             %14"PRIu64"\n",
896                                stats->rx_nombuf);
897                 }
898
899                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
900                        "%14"PRIu64"\n",
901                        stats->opackets, port->tx_dropped,
902                        (uint64_t) (stats->opackets + port->tx_dropped));
903         }
904
905 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
906         if (port->rx_stream)
907                 pkt_burst_stats_display("RX",
908                         &port->rx_stream->rx_burst_stats);
909         if (port->tx_stream)
910                 pkt_burst_stats_display("TX",
911                         &port->tx_stream->tx_burst_stats);
912 #endif
913
914         if (port->rx_queue_stats_mapping_enabled) {
915                 printf("\n");
916                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
917                         printf("  Stats reg %2d RX-packets:%14"PRIu64
918                                "     RX-errors:%14"PRIu64
919                                "    RX-bytes:%14"PRIu64"\n",
920                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
921                 }
922                 printf("\n");
923         }
924         if (port->tx_queue_stats_mapping_enabled) {
925                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
926                         printf("  Stats reg %2d TX-packets:%14"PRIu64
927                                "                                 TX-bytes:%14"PRIu64"\n",
928                                i, stats->q_opackets[i], stats->q_obytes[i]);
929                 }
930         }
931
932         printf("  %s--------------------------------%s\n",
933                fwd_stats_border, fwd_stats_border);
934 }
935
936 static void
937 fwd_stream_stats_display(streamid_t stream_id)
938 {
939         struct fwd_stream *fs;
940         static const char *fwd_top_stats_border = "-------";
941
942         fs = fwd_streams[stream_id];
943         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
944             (fs->fwd_dropped == 0))
945                 return;
946         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
947                "TX Port=%2d/Queue=%2d %s\n",
948                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
949                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
950         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
951                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
952
953         /* if checksum mode */
954         if (cur_fwd_eng == &csum_fwd_engine) {
955                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
956                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
957         }
958
959 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
960         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
961         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
962 #endif
963 }
964
965 static void
966 flush_fwd_rx_queues(void)
967 {
968         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
969         portid_t  rxp;
970         portid_t port_id;
971         queueid_t rxq;
972         uint16_t  nb_rx;
973         uint16_t  i;
974         uint8_t   j;
975         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
976         uint64_t timer_period;
977
978         /* convert to number of cycles */
979         timer_period = rte_get_timer_hz(); /* 1 second timeout */
980
981         for (j = 0; j < 2; j++) {
982                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
983                         for (rxq = 0; rxq < nb_rxq; rxq++) {
984                                 port_id = fwd_ports_ids[rxp];
985                                 /**
986                                 * testpmd can stuck in the below do while loop
987                                 * if rte_eth_rx_burst() always returns nonzero
988                                 * packets. So timer is added to exit this loop
989                                 * after 1sec timer expiry.
990                                 */
991                                 prev_tsc = rte_rdtsc();
992                                 do {
993                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
994                                                 pkts_burst, MAX_PKT_BURST);
995                                         for (i = 0; i < nb_rx; i++)
996                                                 rte_pktmbuf_free(pkts_burst[i]);
997
998                                         cur_tsc = rte_rdtsc();
999                                         diff_tsc = cur_tsc - prev_tsc;
1000                                         timer_tsc += diff_tsc;
1001                                 } while ((nb_rx > 0) &&
1002                                         (timer_tsc < timer_period));
1003                                 timer_tsc = 0;
1004                         }
1005                 }
1006                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1007         }
1008 }
1009
1010 static void
1011 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1012 {
1013         struct fwd_stream **fsm;
1014         streamid_t nb_fs;
1015         streamid_t sm_id;
1016 #ifdef RTE_LIBRTE_BITRATE
1017         uint64_t tics_per_1sec;
1018         uint64_t tics_datum;
1019         uint64_t tics_current;
1020         uint8_t idx_port, cnt_ports;
1021
1022         cnt_ports = rte_eth_dev_count();
1023         tics_datum = rte_rdtsc();
1024         tics_per_1sec = rte_get_timer_hz();
1025 #endif
1026         fsm = &fwd_streams[fc->stream_idx];
1027         nb_fs = fc->stream_nb;
1028         do {
1029                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1030                         (*pkt_fwd)(fsm[sm_id]);
1031 #ifdef RTE_LIBRTE_BITRATE
1032                 if (bitrate_enabled != 0 &&
1033                                 bitrate_lcore_id == rte_lcore_id()) {
1034                         tics_current = rte_rdtsc();
1035                         if (tics_current - tics_datum >= tics_per_1sec) {
1036                                 /* Periodic bitrate calculation */
1037                                 for (idx_port = 0;
1038                                                 idx_port < cnt_ports;
1039                                                 idx_port++)
1040                                         rte_stats_bitrate_calc(bitrate_data,
1041                                                 idx_port);
1042                                 tics_datum = tics_current;
1043                         }
1044                 }
1045 #endif
1046 #ifdef RTE_LIBRTE_LATENCY_STATS
1047                 if (latencystats_enabled != 0 &&
1048                                 latencystats_lcore_id == rte_lcore_id())
1049                         rte_latencystats_update();
1050 #endif
1051
1052         } while (! fc->stopped);
1053 }
1054
1055 static int
1056 start_pkt_forward_on_core(void *fwd_arg)
1057 {
1058         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1059                              cur_fwd_config.fwd_eng->packet_fwd);
1060         return 0;
1061 }
1062
1063 /*
1064  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1065  * Used to start communication flows in network loopback test configurations.
1066  */
1067 static int
1068 run_one_txonly_burst_on_core(void *fwd_arg)
1069 {
1070         struct fwd_lcore *fwd_lc;
1071         struct fwd_lcore tmp_lcore;
1072
1073         fwd_lc = (struct fwd_lcore *) fwd_arg;
1074         tmp_lcore = *fwd_lc;
1075         tmp_lcore.stopped = 1;
1076         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1077         return 0;
1078 }
1079
1080 /*
1081  * Launch packet forwarding:
1082  *     - Setup per-port forwarding context.
1083  *     - launch logical cores with their forwarding configuration.
1084  */
1085 static void
1086 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1087 {
1088         port_fwd_begin_t port_fwd_begin;
1089         unsigned int i;
1090         unsigned int lc_id;
1091         int diag;
1092
1093         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1094         if (port_fwd_begin != NULL) {
1095                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1096                         (*port_fwd_begin)(fwd_ports_ids[i]);
1097         }
1098         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1099                 lc_id = fwd_lcores_cpuids[i];
1100                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1101                         fwd_lcores[i]->stopped = 0;
1102                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1103                                                      fwd_lcores[i], lc_id);
1104                         if (diag != 0)
1105                                 printf("launch lcore %u failed - diag=%d\n",
1106                                        lc_id, diag);
1107                 }
1108         }
1109 }
1110
1111 /*
1112  * Launch packet forwarding configuration.
1113  */
1114 void
1115 start_packet_forwarding(int with_tx_first)
1116 {
1117         port_fwd_begin_t port_fwd_begin;
1118         port_fwd_end_t  port_fwd_end;
1119         struct rte_port *port;
1120         unsigned int i;
1121         portid_t   pt_id;
1122         streamid_t sm_id;
1123
1124         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1125                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1126
1127         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1128                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1129
1130         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1131                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1132                 (!nb_rxq || !nb_txq))
1133                 rte_exit(EXIT_FAILURE,
1134                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1135                         cur_fwd_eng->fwd_mode_name);
1136
1137         if (all_ports_started() == 0) {
1138                 printf("Not all ports were started\n");
1139                 return;
1140         }
1141         if (test_done == 0) {
1142                 printf("Packet forwarding already started\n");
1143                 return;
1144         }
1145
1146         if (init_fwd_streams() < 0) {
1147                 printf("Fail from init_fwd_streams()\n");
1148                 return;
1149         }
1150
1151         if(dcb_test) {
1152                 for (i = 0; i < nb_fwd_ports; i++) {
1153                         pt_id = fwd_ports_ids[i];
1154                         port = &ports[pt_id];
1155                         if (!port->dcb_flag) {
1156                                 printf("In DCB mode, all forwarding ports must "
1157                                        "be configured in this mode.\n");
1158                                 return;
1159                         }
1160                 }
1161                 if (nb_fwd_lcores == 1) {
1162                         printf("In DCB mode,the nb forwarding cores "
1163                                "should be larger than 1.\n");
1164                         return;
1165                 }
1166         }
1167         test_done = 0;
1168
1169         if(!no_flush_rx)
1170                 flush_fwd_rx_queues();
1171
1172         fwd_config_setup();
1173         pkt_fwd_config_display(&cur_fwd_config);
1174         rxtx_config_display();
1175
1176         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1177                 pt_id = fwd_ports_ids[i];
1178                 port = &ports[pt_id];
1179                 rte_eth_stats_get(pt_id, &port->stats);
1180                 port->tx_dropped = 0;
1181
1182                 map_port_queue_stats_mapping_registers(pt_id, port);
1183         }
1184         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1185                 fwd_streams[sm_id]->rx_packets = 0;
1186                 fwd_streams[sm_id]->tx_packets = 0;
1187                 fwd_streams[sm_id]->fwd_dropped = 0;
1188                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1189                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1190
1191 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1192                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1193                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1194                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1195                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1196 #endif
1197 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1198                 fwd_streams[sm_id]->core_cycles = 0;
1199 #endif
1200         }
1201         if (with_tx_first) {
1202                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1203                 if (port_fwd_begin != NULL) {
1204                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1205                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1206                 }
1207                 while (with_tx_first--) {
1208                         launch_packet_forwarding(
1209                                         run_one_txonly_burst_on_core);
1210                         rte_eal_mp_wait_lcore();
1211                 }
1212                 port_fwd_end = tx_only_engine.port_fwd_end;
1213                 if (port_fwd_end != NULL) {
1214                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1215                                 (*port_fwd_end)(fwd_ports_ids[i]);
1216                 }
1217         }
1218         launch_packet_forwarding(start_pkt_forward_on_core);
1219 }
1220
1221 void
1222 stop_packet_forwarding(void)
1223 {
1224         struct rte_eth_stats stats;
1225         struct rte_port *port;
1226         port_fwd_end_t  port_fwd_end;
1227         int i;
1228         portid_t   pt_id;
1229         streamid_t sm_id;
1230         lcoreid_t  lc_id;
1231         uint64_t total_recv;
1232         uint64_t total_xmit;
1233         uint64_t total_rx_dropped;
1234         uint64_t total_tx_dropped;
1235         uint64_t total_rx_nombuf;
1236         uint64_t tx_dropped;
1237         uint64_t rx_bad_ip_csum;
1238         uint64_t rx_bad_l4_csum;
1239 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1240         uint64_t fwd_cycles;
1241 #endif
1242
1243         static const char *acc_stats_border = "+++++++++++++++";
1244
1245         if (test_done) {
1246                 printf("Packet forwarding not started\n");
1247                 return;
1248         }
1249         printf("Telling cores to stop...");
1250         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1251                 fwd_lcores[lc_id]->stopped = 1;
1252         printf("\nWaiting for lcores to finish...\n");
1253         rte_eal_mp_wait_lcore();
1254         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1255         if (port_fwd_end != NULL) {
1256                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1257                         pt_id = fwd_ports_ids[i];
1258                         (*port_fwd_end)(pt_id);
1259                 }
1260         }
1261 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1262         fwd_cycles = 0;
1263 #endif
1264         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1265                 if (cur_fwd_config.nb_fwd_streams >
1266                     cur_fwd_config.nb_fwd_ports) {
1267                         fwd_stream_stats_display(sm_id);
1268                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1269                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1270                 } else {
1271                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1272                                 fwd_streams[sm_id];
1273                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1274                                 fwd_streams[sm_id];
1275                 }
1276                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1277                 tx_dropped = (uint64_t) (tx_dropped +
1278                                          fwd_streams[sm_id]->fwd_dropped);
1279                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1280
1281                 rx_bad_ip_csum =
1282                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1283                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1284                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1285                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1286                                                         rx_bad_ip_csum;
1287
1288                 rx_bad_l4_csum =
1289                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1290                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1291                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1292                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1293                                                         rx_bad_l4_csum;
1294
1295 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1296                 fwd_cycles = (uint64_t) (fwd_cycles +
1297                                          fwd_streams[sm_id]->core_cycles);
1298 #endif
1299         }
1300         total_recv = 0;
1301         total_xmit = 0;
1302         total_rx_dropped = 0;
1303         total_tx_dropped = 0;
1304         total_rx_nombuf  = 0;
1305         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1306                 pt_id = fwd_ports_ids[i];
1307
1308                 port = &ports[pt_id];
1309                 rte_eth_stats_get(pt_id, &stats);
1310                 stats.ipackets -= port->stats.ipackets;
1311                 port->stats.ipackets = 0;
1312                 stats.opackets -= port->stats.opackets;
1313                 port->stats.opackets = 0;
1314                 stats.ibytes   -= port->stats.ibytes;
1315                 port->stats.ibytes = 0;
1316                 stats.obytes   -= port->stats.obytes;
1317                 port->stats.obytes = 0;
1318                 stats.imissed  -= port->stats.imissed;
1319                 port->stats.imissed = 0;
1320                 stats.oerrors  -= port->stats.oerrors;
1321                 port->stats.oerrors = 0;
1322                 stats.rx_nombuf -= port->stats.rx_nombuf;
1323                 port->stats.rx_nombuf = 0;
1324
1325                 total_recv += stats.ipackets;
1326                 total_xmit += stats.opackets;
1327                 total_rx_dropped += stats.imissed;
1328                 total_tx_dropped += port->tx_dropped;
1329                 total_rx_nombuf  += stats.rx_nombuf;
1330
1331                 fwd_port_stats_display(pt_id, &stats);
1332         }
1333
1334         printf("\n  %s Accumulated forward statistics for all ports"
1335                "%s\n",
1336                acc_stats_border, acc_stats_border);
1337         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1338                "%-"PRIu64"\n"
1339                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1340                "%-"PRIu64"\n",
1341                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1342                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1343         if (total_rx_nombuf > 0)
1344                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1345         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1346                "%s\n",
1347                acc_stats_border, acc_stats_border);
1348 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1349         if (total_recv > 0)
1350                 printf("\n  CPU cycles/packet=%u (total cycles="
1351                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1352                        (unsigned int)(fwd_cycles / total_recv),
1353                        fwd_cycles, total_recv);
1354 #endif
1355         printf("\nDone.\n");
1356         test_done = 1;
1357 }
1358
1359 void
1360 dev_set_link_up(portid_t pid)
1361 {
1362         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1363                 printf("\nSet link up fail.\n");
1364 }
1365
1366 void
1367 dev_set_link_down(portid_t pid)
1368 {
1369         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1370                 printf("\nSet link down fail.\n");
1371 }
1372
1373 static int
1374 all_ports_started(void)
1375 {
1376         portid_t pi;
1377         struct rte_port *port;
1378
1379         RTE_ETH_FOREACH_DEV(pi) {
1380                 port = &ports[pi];
1381                 /* Check if there is a port which is not started */
1382                 if ((port->port_status != RTE_PORT_STARTED) &&
1383                         (port->slave_flag == 0))
1384                         return 0;
1385         }
1386
1387         /* No port is not started */
1388         return 1;
1389 }
1390
1391 int
1392 all_ports_stopped(void)
1393 {
1394         portid_t pi;
1395         struct rte_port *port;
1396
1397         RTE_ETH_FOREACH_DEV(pi) {
1398                 port = &ports[pi];
1399                 if ((port->port_status != RTE_PORT_STOPPED) &&
1400                         (port->slave_flag == 0))
1401                         return 0;
1402         }
1403
1404         return 1;
1405 }
1406
1407 int
1408 port_is_started(portid_t port_id)
1409 {
1410         if (port_id_is_invalid(port_id, ENABLED_WARN))
1411                 return 0;
1412
1413         if (ports[port_id].port_status != RTE_PORT_STARTED)
1414                 return 0;
1415
1416         return 1;
1417 }
1418
1419 static int
1420 port_is_closed(portid_t port_id)
1421 {
1422         if (port_id_is_invalid(port_id, ENABLED_WARN))
1423                 return 0;
1424
1425         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1426                 return 0;
1427
1428         return 1;
1429 }
1430
1431 int
1432 start_port(portid_t pid)
1433 {
1434         int diag, need_check_link_status = -1;
1435         portid_t pi;
1436         queueid_t qi;
1437         struct rte_port *port;
1438         struct ether_addr mac_addr;
1439         enum rte_eth_event_type event_type;
1440
1441         if (port_id_is_invalid(pid, ENABLED_WARN))
1442                 return 0;
1443
1444         if(dcb_config)
1445                 dcb_test = 1;
1446         RTE_ETH_FOREACH_DEV(pi) {
1447                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1448                         continue;
1449
1450                 need_check_link_status = 0;
1451                 port = &ports[pi];
1452                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1453                                                  RTE_PORT_HANDLING) == 0) {
1454                         printf("Port %d is now not stopped\n", pi);
1455                         continue;
1456                 }
1457
1458                 if (port->need_reconfig > 0) {
1459                         port->need_reconfig = 0;
1460
1461                         if (flow_isolate_all) {
1462                                 int ret = port_flow_isolate(pi, 1);
1463                                 if (ret) {
1464                                         printf("Failed to apply isolated"
1465                                                " mode on port %d\n", pi);
1466                                         return -1;
1467                                 }
1468                         }
1469
1470                         printf("Configuring Port %d (socket %u)\n", pi,
1471                                         port->socket_id);
1472                         /* configure port */
1473                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1474                                                 &(port->dev_conf));
1475                         if (diag != 0) {
1476                                 if (rte_atomic16_cmpset(&(port->port_status),
1477                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1478                                         printf("Port %d can not be set back "
1479                                                         "to stopped\n", pi);
1480                                 printf("Fail to configure port %d\n", pi);
1481                                 /* try to reconfigure port next time */
1482                                 port->need_reconfig = 1;
1483                                 return -1;
1484                         }
1485                 }
1486                 if (port->need_reconfig_queues > 0) {
1487                         port->need_reconfig_queues = 0;
1488                         /* setup tx queues */
1489                         for (qi = 0; qi < nb_txq; qi++) {
1490                                 if ((numa_support) &&
1491                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1492                                         diag = rte_eth_tx_queue_setup(pi, qi,
1493                                                 nb_txd,txring_numa[pi],
1494                                                 &(port->tx_conf));
1495                                 else
1496                                         diag = rte_eth_tx_queue_setup(pi, qi,
1497                                                 nb_txd,port->socket_id,
1498                                                 &(port->tx_conf));
1499
1500                                 if (diag == 0)
1501                                         continue;
1502
1503                                 /* Fail to setup tx queue, return */
1504                                 if (rte_atomic16_cmpset(&(port->port_status),
1505                                                         RTE_PORT_HANDLING,
1506                                                         RTE_PORT_STOPPED) == 0)
1507                                         printf("Port %d can not be set back "
1508                                                         "to stopped\n", pi);
1509                                 printf("Fail to configure port %d tx queues\n", pi);
1510                                 /* try to reconfigure queues next time */
1511                                 port->need_reconfig_queues = 1;
1512                                 return -1;
1513                         }
1514                         /* setup rx queues */
1515                         for (qi = 0; qi < nb_rxq; qi++) {
1516                                 if ((numa_support) &&
1517                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1518                                         struct rte_mempool * mp =
1519                                                 mbuf_pool_find(rxring_numa[pi]);
1520                                         if (mp == NULL) {
1521                                                 printf("Failed to setup RX queue:"
1522                                                         "No mempool allocation"
1523                                                         " on the socket %d\n",
1524                                                         rxring_numa[pi]);
1525                                                 return -1;
1526                                         }
1527
1528                                         diag = rte_eth_rx_queue_setup(pi, qi,
1529                                              nb_rxd,rxring_numa[pi],
1530                                              &(port->rx_conf),mp);
1531                                 } else {
1532                                         struct rte_mempool *mp =
1533                                                 mbuf_pool_find(port->socket_id);
1534                                         if (mp == NULL) {
1535                                                 printf("Failed to setup RX queue:"
1536                                                         "No mempool allocation"
1537                                                         " on the socket %d\n",
1538                                                         port->socket_id);
1539                                                 return -1;
1540                                         }
1541                                         diag = rte_eth_rx_queue_setup(pi, qi,
1542                                              nb_rxd,port->socket_id,
1543                                              &(port->rx_conf), mp);
1544                                 }
1545                                 if (diag == 0)
1546                                         continue;
1547
1548                                 /* Fail to setup rx queue, return */
1549                                 if (rte_atomic16_cmpset(&(port->port_status),
1550                                                         RTE_PORT_HANDLING,
1551                                                         RTE_PORT_STOPPED) == 0)
1552                                         printf("Port %d can not be set back "
1553                                                         "to stopped\n", pi);
1554                                 printf("Fail to configure port %d rx queues\n", pi);
1555                                 /* try to reconfigure queues next time */
1556                                 port->need_reconfig_queues = 1;
1557                                 return -1;
1558                         }
1559                 }
1560
1561                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1562                      event_type < RTE_ETH_EVENT_MAX;
1563                      event_type++) {
1564                         diag = rte_eth_dev_callback_register(pi,
1565                                                         event_type,
1566                                                         eth_event_callback,
1567                                                         NULL);
1568                         if (diag) {
1569                                 printf("Failed to setup even callback for event %d\n",
1570                                         event_type);
1571                                 return -1;
1572                         }
1573                 }
1574
1575                 /* start port */
1576                 if (rte_eth_dev_start(pi) < 0) {
1577                         printf("Fail to start port %d\n", pi);
1578
1579                         /* Fail to setup rx queue, return */
1580                         if (rte_atomic16_cmpset(&(port->port_status),
1581                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1582                                 printf("Port %d can not be set back to "
1583                                                         "stopped\n", pi);
1584                         continue;
1585                 }
1586
1587                 if (rte_atomic16_cmpset(&(port->port_status),
1588                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1589                         printf("Port %d can not be set into started\n", pi);
1590
1591                 rte_eth_macaddr_get(pi, &mac_addr);
1592                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1593                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1594                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1595                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1596
1597                 /* at least one port started, need checking link status */
1598                 need_check_link_status = 1;
1599         }
1600
1601         if (need_check_link_status == 1 && !no_link_check)
1602                 check_all_ports_link_status(RTE_PORT_ALL);
1603         else if (need_check_link_status == 0)
1604                 printf("Please stop the ports first\n");
1605
1606         printf("Done\n");
1607         return 0;
1608 }
1609
1610 void
1611 stop_port(portid_t pid)
1612 {
1613         portid_t pi;
1614         struct rte_port *port;
1615         int need_check_link_status = 0;
1616
1617         if (dcb_test) {
1618                 dcb_test = 0;
1619                 dcb_config = 0;
1620         }
1621
1622         if (port_id_is_invalid(pid, ENABLED_WARN))
1623                 return;
1624
1625         printf("Stopping ports...\n");
1626
1627         RTE_ETH_FOREACH_DEV(pi) {
1628                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1629                         continue;
1630
1631                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1632                         printf("Please remove port %d from forwarding configuration.\n", pi);
1633                         continue;
1634                 }
1635
1636                 if (port_is_bonding_slave(pi)) {
1637                         printf("Please remove port %d from bonded device.\n", pi);
1638                         continue;
1639                 }
1640
1641                 port = &ports[pi];
1642                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1643                                                 RTE_PORT_HANDLING) == 0)
1644                         continue;
1645
1646                 rte_eth_dev_stop(pi);
1647
1648                 if (rte_atomic16_cmpset(&(port->port_status),
1649                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1650                         printf("Port %d can not be set into stopped\n", pi);
1651                 need_check_link_status = 1;
1652         }
1653         if (need_check_link_status && !no_link_check)
1654                 check_all_ports_link_status(RTE_PORT_ALL);
1655
1656         printf("Done\n");
1657 }
1658
1659 void
1660 close_port(portid_t pid)
1661 {
1662         portid_t pi;
1663         struct rte_port *port;
1664
1665         if (port_id_is_invalid(pid, ENABLED_WARN))
1666                 return;
1667
1668         printf("Closing ports...\n");
1669
1670         RTE_ETH_FOREACH_DEV(pi) {
1671                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1672                         continue;
1673
1674                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1675                         printf("Please remove port %d from forwarding configuration.\n", pi);
1676                         continue;
1677                 }
1678
1679                 if (port_is_bonding_slave(pi)) {
1680                         printf("Please remove port %d from bonded device.\n", pi);
1681                         continue;
1682                 }
1683
1684                 port = &ports[pi];
1685                 if (rte_atomic16_cmpset(&(port->port_status),
1686                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1687                         printf("Port %d is already closed\n", pi);
1688                         continue;
1689                 }
1690
1691                 if (rte_atomic16_cmpset(&(port->port_status),
1692                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1693                         printf("Port %d is now not stopped\n", pi);
1694                         continue;
1695                 }
1696
1697                 if (port->flow_list)
1698                         port_flow_flush(pi);
1699                 rte_eth_dev_close(pi);
1700
1701                 if (rte_atomic16_cmpset(&(port->port_status),
1702                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1703                         printf("Port %d cannot be set to closed\n", pi);
1704         }
1705
1706         printf("Done\n");
1707 }
1708
1709 void
1710 reset_port(portid_t pid)
1711 {
1712         int diag;
1713         portid_t pi;
1714         struct rte_port *port;
1715
1716         if (port_id_is_invalid(pid, ENABLED_WARN))
1717                 return;
1718
1719         printf("Resetting ports...\n");
1720
1721         RTE_ETH_FOREACH_DEV(pi) {
1722                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1723                         continue;
1724
1725                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1726                         printf("Please remove port %d from forwarding "
1727                                "configuration.\n", pi);
1728                         continue;
1729                 }
1730
1731                 if (port_is_bonding_slave(pi)) {
1732                         printf("Please remove port %d from bonded device.\n",
1733                                pi);
1734                         continue;
1735                 }
1736
1737                 diag = rte_eth_dev_reset(pi);
1738                 if (diag == 0) {
1739                         port = &ports[pi];
1740                         port->need_reconfig = 1;
1741                         port->need_reconfig_queues = 1;
1742                 } else {
1743                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1744                 }
1745         }
1746
1747         printf("Done\n");
1748 }
1749
1750 void
1751 attach_port(char *identifier)
1752 {
1753         portid_t pi = 0;
1754         unsigned int socket_id;
1755
1756         printf("Attaching a new port...\n");
1757
1758         if (identifier == NULL) {
1759                 printf("Invalid parameters are specified\n");
1760                 return;
1761         }
1762
1763         if (rte_eth_dev_attach(identifier, &pi))
1764                 return;
1765
1766         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1767         /* if socket_id is invalid, set to 0 */
1768         if (check_socket_id(socket_id) < 0)
1769                 socket_id = 0;
1770         reconfig(pi, socket_id);
1771         rte_eth_promiscuous_enable(pi);
1772
1773         nb_ports = rte_eth_dev_count();
1774
1775         ports[pi].port_status = RTE_PORT_STOPPED;
1776
1777         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1778         printf("Done\n");
1779 }
1780
1781 void
1782 detach_port(uint8_t port_id)
1783 {
1784         char name[RTE_ETH_NAME_MAX_LEN];
1785
1786         printf("Detaching a port...\n");
1787
1788         if (!port_is_closed(port_id)) {
1789                 printf("Please close port first\n");
1790                 return;
1791         }
1792
1793         if (ports[port_id].flow_list)
1794                 port_flow_flush(port_id);
1795
1796         if (rte_eth_dev_detach(port_id, name)) {
1797                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1798                 return;
1799         }
1800
1801         nb_ports = rte_eth_dev_count();
1802
1803         printf("Port '%s' is detached. Now total ports is %d\n",
1804                         name, nb_ports);
1805         printf("Done\n");
1806         return;
1807 }
1808
1809 void
1810 pmd_test_exit(void)
1811 {
1812         portid_t pt_id;
1813
1814         if (test_done == 0)
1815                 stop_packet_forwarding();
1816
1817         if (ports != NULL) {
1818                 no_link_check = 1;
1819                 RTE_ETH_FOREACH_DEV(pt_id) {
1820                         printf("\nShutting down port %d...\n", pt_id);
1821                         fflush(stdout);
1822                         stop_port(pt_id);
1823                         close_port(pt_id);
1824                 }
1825         }
1826         printf("\nBye...\n");
1827 }
1828
1829 typedef void (*cmd_func_t)(void);
1830 struct pmd_test_command {
1831         const char *cmd_name;
1832         cmd_func_t cmd_func;
1833 };
1834
1835 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1836
1837 /* Check the link status of all ports in up to 9s, and print them finally */
1838 static void
1839 check_all_ports_link_status(uint32_t port_mask)
1840 {
1841 #define CHECK_INTERVAL 100 /* 100ms */
1842 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1843         portid_t portid;
1844         uint8_t count, all_ports_up, print_flag = 0;
1845         struct rte_eth_link link;
1846
1847         printf("Checking link statuses...\n");
1848         fflush(stdout);
1849         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1850                 all_ports_up = 1;
1851                 RTE_ETH_FOREACH_DEV(portid) {
1852                         if ((port_mask & (1 << portid)) == 0)
1853                                 continue;
1854                         memset(&link, 0, sizeof(link));
1855                         rte_eth_link_get_nowait(portid, &link);
1856                         /* print link status if flag set */
1857                         if (print_flag == 1) {
1858                                 if (link.link_status)
1859                                         printf(
1860                                         "Port%d Link Up. speed %u Mbps- %s\n",
1861                                         portid, link.link_speed,
1862                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1863                                         ("full-duplex") : ("half-duplex\n"));
1864                                 else
1865                                         printf("Port %d Link Down\n", portid);
1866                                 continue;
1867                         }
1868                         /* clear all_ports_up flag if any link down */
1869                         if (link.link_status == ETH_LINK_DOWN) {
1870                                 all_ports_up = 0;
1871                                 break;
1872                         }
1873                 }
1874                 /* after finally printing all link status, get out */
1875                 if (print_flag == 1)
1876                         break;
1877
1878                 if (all_ports_up == 0) {
1879                         fflush(stdout);
1880                         rte_delay_ms(CHECK_INTERVAL);
1881                 }
1882
1883                 /* set the print_flag if all ports up or timeout */
1884                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1885                         print_flag = 1;
1886                 }
1887
1888                 if (lsc_interrupt)
1889                         break;
1890         }
1891 }
1892
1893 static void
1894 rmv_event_callback(void *arg)
1895 {
1896         struct rte_eth_dev *dev;
1897         uint8_t port_id = (intptr_t)arg;
1898
1899         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1900         dev = &rte_eth_devices[port_id];
1901
1902         stop_port(port_id);
1903         close_port(port_id);
1904         printf("removing device %s\n", dev->device->name);
1905         if (rte_eal_dev_detach(dev->device))
1906                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1907                         dev->device->name);
1908 }
1909
1910 /* This function is used by the interrupt thread */
1911 static int
1912 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1913                   void *ret_param)
1914 {
1915         static const char * const event_desc[] = {
1916                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1917                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1918                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1919                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1920                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1921                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1922                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1923                 [RTE_ETH_EVENT_MAX] = NULL,
1924         };
1925
1926         RTE_SET_USED(param);
1927         RTE_SET_USED(ret_param);
1928
1929         if (type >= RTE_ETH_EVENT_MAX) {
1930                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1931                         port_id, __func__, type);
1932                 fflush(stderr);
1933         } else if (event_print_mask & (UINT32_C(1) << type)) {
1934                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1935                         event_desc[type]);
1936                 fflush(stdout);
1937         }
1938
1939         switch (type) {
1940         case RTE_ETH_EVENT_INTR_RMV:
1941                 if (rte_eal_alarm_set(100000,
1942                                 rmv_event_callback, (void *)(intptr_t)port_id))
1943                         fprintf(stderr, "Could not set up deferred device removal\n");
1944                 break;
1945         default:
1946                 break;
1947         }
1948         return 0;
1949 }
1950
1951 static int
1952 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1953 {
1954         uint16_t i;
1955         int diag;
1956         uint8_t mapping_found = 0;
1957
1958         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1959                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1960                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1961                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1962                                         tx_queue_stats_mappings[i].queue_id,
1963                                         tx_queue_stats_mappings[i].stats_counter_id);
1964                         if (diag != 0)
1965                                 return diag;
1966                         mapping_found = 1;
1967                 }
1968         }
1969         if (mapping_found)
1970                 port->tx_queue_stats_mapping_enabled = 1;
1971         return 0;
1972 }
1973
1974 static int
1975 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1976 {
1977         uint16_t i;
1978         int diag;
1979         uint8_t mapping_found = 0;
1980
1981         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1982                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1983                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1984                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1985                                         rx_queue_stats_mappings[i].queue_id,
1986                                         rx_queue_stats_mappings[i].stats_counter_id);
1987                         if (diag != 0)
1988                                 return diag;
1989                         mapping_found = 1;
1990                 }
1991         }
1992         if (mapping_found)
1993                 port->rx_queue_stats_mapping_enabled = 1;
1994         return 0;
1995 }
1996
1997 static void
1998 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1999 {
2000         int diag = 0;
2001
2002         diag = set_tx_queue_stats_mapping_registers(pi, port);
2003         if (diag != 0) {
2004                 if (diag == -ENOTSUP) {
2005                         port->tx_queue_stats_mapping_enabled = 0;
2006                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2007                 }
2008                 else
2009                         rte_exit(EXIT_FAILURE,
2010                                         "set_tx_queue_stats_mapping_registers "
2011                                         "failed for port id=%d diag=%d\n",
2012                                         pi, diag);
2013         }
2014
2015         diag = set_rx_queue_stats_mapping_registers(pi, port);
2016         if (diag != 0) {
2017                 if (diag == -ENOTSUP) {
2018                         port->rx_queue_stats_mapping_enabled = 0;
2019                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2020                 }
2021                 else
2022                         rte_exit(EXIT_FAILURE,
2023                                         "set_rx_queue_stats_mapping_registers "
2024                                         "failed for port id=%d diag=%d\n",
2025                                         pi, diag);
2026         }
2027 }
2028
2029 static void
2030 rxtx_port_config(struct rte_port *port)
2031 {
2032         port->rx_conf = port->dev_info.default_rxconf;
2033         port->tx_conf = port->dev_info.default_txconf;
2034
2035         /* Check if any RX/TX parameters have been passed */
2036         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2037                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2038
2039         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2040                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2041
2042         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2043                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2044
2045         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2046                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2047
2048         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2049                 port->rx_conf.rx_drop_en = rx_drop_en;
2050
2051         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2052                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2053
2054         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2055                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2056
2057         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2058                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2059
2060         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2061                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2062
2063         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2064                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2065
2066         if (txq_flags != RTE_PMD_PARAM_UNSET)
2067                 port->tx_conf.txq_flags = txq_flags;
2068 }
2069
2070 void
2071 init_port_config(void)
2072 {
2073         portid_t pid;
2074         struct rte_port *port;
2075
2076         RTE_ETH_FOREACH_DEV(pid) {
2077                 port = &ports[pid];
2078                 port->dev_conf.rxmode = rx_mode;
2079                 port->dev_conf.fdir_conf = fdir_conf;
2080                 if (nb_rxq > 1) {
2081                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2082                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2083                 } else {
2084                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2085                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2086                 }
2087
2088                 if (port->dcb_flag == 0) {
2089                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2090                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2091                         else
2092                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2093                 }
2094
2095                 rxtx_port_config(port);
2096
2097                 rte_eth_macaddr_get(pid, &port->eth_addr);
2098
2099                 map_port_queue_stats_mapping_registers(pid, port);
2100 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2101                 rte_pmd_ixgbe_bypass_init(pid);
2102 #endif
2103
2104                 if (lsc_interrupt &&
2105                     (rte_eth_devices[pid].data->dev_flags &
2106                      RTE_ETH_DEV_INTR_LSC))
2107                         port->dev_conf.intr_conf.lsc = 1;
2108                 if (rmv_interrupt &&
2109                     (rte_eth_devices[pid].data->dev_flags &
2110                      RTE_ETH_DEV_INTR_RMV))
2111                         port->dev_conf.intr_conf.rmv = 1;
2112         }
2113 }
2114
2115 void set_port_slave_flag(portid_t slave_pid)
2116 {
2117         struct rte_port *port;
2118
2119         port = &ports[slave_pid];
2120         port->slave_flag = 1;
2121 }
2122
2123 void clear_port_slave_flag(portid_t slave_pid)
2124 {
2125         struct rte_port *port;
2126
2127         port = &ports[slave_pid];
2128         port->slave_flag = 0;
2129 }
2130
2131 uint8_t port_is_bonding_slave(portid_t slave_pid)
2132 {
2133         struct rte_port *port;
2134
2135         port = &ports[slave_pid];
2136         return port->slave_flag;
2137 }
2138
2139 const uint16_t vlan_tags[] = {
2140                 0,  1,  2,  3,  4,  5,  6,  7,
2141                 8,  9, 10, 11,  12, 13, 14, 15,
2142                 16, 17, 18, 19, 20, 21, 22, 23,
2143                 24, 25, 26, 27, 28, 29, 30, 31
2144 };
2145
2146 static  int
2147 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2148                  enum dcb_mode_enable dcb_mode,
2149                  enum rte_eth_nb_tcs num_tcs,
2150                  uint8_t pfc_en)
2151 {
2152         uint8_t i;
2153
2154         /*
2155          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2156          * given above, and the number of traffic classes available for use.
2157          */
2158         if (dcb_mode == DCB_VT_ENABLED) {
2159                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2160                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2161                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2162                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2163
2164                 /* VMDQ+DCB RX and TX configurations */
2165                 vmdq_rx_conf->enable_default_pool = 0;
2166                 vmdq_rx_conf->default_pool = 0;
2167                 vmdq_rx_conf->nb_queue_pools =
2168                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2169                 vmdq_tx_conf->nb_queue_pools =
2170                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2171
2172                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2173                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2174                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2175                         vmdq_rx_conf->pool_map[i].pools =
2176                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2177                 }
2178                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2179                         vmdq_rx_conf->dcb_tc[i] = i;
2180                         vmdq_tx_conf->dcb_tc[i] = i;
2181                 }
2182
2183                 /* set DCB mode of RX and TX of multiple queues */
2184                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2185                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2186         } else {
2187                 struct rte_eth_dcb_rx_conf *rx_conf =
2188                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2189                 struct rte_eth_dcb_tx_conf *tx_conf =
2190                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2191
2192                 rx_conf->nb_tcs = num_tcs;
2193                 tx_conf->nb_tcs = num_tcs;
2194
2195                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2196                         rx_conf->dcb_tc[i] = i % num_tcs;
2197                         tx_conf->dcb_tc[i] = i % num_tcs;
2198                 }
2199                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2200                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2201                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2202         }
2203
2204         if (pfc_en)
2205                 eth_conf->dcb_capability_en =
2206                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2207         else
2208                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2209
2210         return 0;
2211 }
2212
2213 int
2214 init_port_dcb_config(portid_t pid,
2215                      enum dcb_mode_enable dcb_mode,
2216                      enum rte_eth_nb_tcs num_tcs,
2217                      uint8_t pfc_en)
2218 {
2219         struct rte_eth_conf port_conf;
2220         struct rte_port *rte_port;
2221         int retval;
2222         uint16_t i;
2223
2224         rte_port = &ports[pid];
2225
2226         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2227         /* Enter DCB configuration status */
2228         dcb_config = 1;
2229
2230         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2231         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2232         if (retval < 0)
2233                 return retval;
2234         port_conf.rxmode.hw_vlan_filter = 1;
2235
2236         /**
2237          * Write the configuration into the device.
2238          * Set the numbers of RX & TX queues to 0, so
2239          * the RX & TX queues will not be setup.
2240          */
2241         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2242
2243         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2244
2245         /* If dev_info.vmdq_pool_base is greater than 0,
2246          * the queue id of vmdq pools is started after pf queues.
2247          */
2248         if (dcb_mode == DCB_VT_ENABLED &&
2249             rte_port->dev_info.vmdq_pool_base > 0) {
2250                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2251                         " for port %d.", pid);
2252                 return -1;
2253         }
2254
2255         /* Assume the ports in testpmd have the same dcb capability
2256          * and has the same number of rxq and txq in dcb mode
2257          */
2258         if (dcb_mode == DCB_VT_ENABLED) {
2259                 if (rte_port->dev_info.max_vfs > 0) {
2260                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2261                         nb_txq = rte_port->dev_info.nb_tx_queues;
2262                 } else {
2263                         nb_rxq = rte_port->dev_info.max_rx_queues;
2264                         nb_txq = rte_port->dev_info.max_tx_queues;
2265                 }
2266         } else {
2267                 /*if vt is disabled, use all pf queues */
2268                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2269                         nb_rxq = rte_port->dev_info.max_rx_queues;
2270                         nb_txq = rte_port->dev_info.max_tx_queues;
2271                 } else {
2272                         nb_rxq = (queueid_t)num_tcs;
2273                         nb_txq = (queueid_t)num_tcs;
2274
2275                 }
2276         }
2277         rx_free_thresh = 64;
2278
2279         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2280
2281         rxtx_port_config(rte_port);
2282         /* VLAN filter */
2283         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2284         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2285                 rx_vft_set(pid, vlan_tags[i], 1);
2286
2287         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2288         map_port_queue_stats_mapping_registers(pid, rte_port);
2289
2290         rte_port->dcb_flag = 1;
2291
2292         return 0;
2293 }
2294
2295 static void
2296 init_port(void)
2297 {
2298         /* Configuration of Ethernet ports. */
2299         ports = rte_zmalloc("testpmd: ports",
2300                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2301                             RTE_CACHE_LINE_SIZE);
2302         if (ports == NULL) {
2303                 rte_exit(EXIT_FAILURE,
2304                                 "rte_zmalloc(%d struct rte_port) failed\n",
2305                                 RTE_MAX_ETHPORTS);
2306         }
2307 }
2308
2309 static void
2310 force_quit(void)
2311 {
2312         pmd_test_exit();
2313         prompt_exit();
2314 }
2315
2316 static void
2317 print_stats(void)
2318 {
2319         uint8_t i;
2320         const char clr[] = { 27, '[', '2', 'J', '\0' };
2321         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2322
2323         /* Clear screen and move to top left */
2324         printf("%s%s", clr, top_left);
2325
2326         printf("\nPort statistics ====================================");
2327         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2328                 nic_stats_display(fwd_ports_ids[i]);
2329 }
2330
2331 static void
2332 signal_handler(int signum)
2333 {
2334         if (signum == SIGINT || signum == SIGTERM) {
2335                 printf("\nSignal %d received, preparing to exit...\n",
2336                                 signum);
2337 #ifdef RTE_LIBRTE_PDUMP
2338                 /* uninitialize packet capture framework */
2339                 rte_pdump_uninit();
2340 #endif
2341 #ifdef RTE_LIBRTE_LATENCY_STATS
2342                 rte_latencystats_uninit();
2343 #endif
2344                 force_quit();
2345                 /* Set flag to indicate the force termination. */
2346                 f_quit = 1;
2347                 /* exit with the expected status */
2348                 signal(signum, SIG_DFL);
2349                 kill(getpid(), signum);
2350         }
2351 }
2352
2353 int
2354 main(int argc, char** argv)
2355 {
2356         int  diag;
2357         portid_t port_id;
2358
2359         signal(SIGINT, signal_handler);
2360         signal(SIGTERM, signal_handler);
2361
2362         diag = rte_eal_init(argc, argv);
2363         if (diag < 0)
2364                 rte_panic("Cannot init EAL\n");
2365
2366         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2367                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2368                         strerror(errno));
2369         }
2370
2371 #ifdef RTE_LIBRTE_PDUMP
2372         /* initialize packet capture framework */
2373         rte_pdump_init(NULL);
2374 #endif
2375
2376         nb_ports = (portid_t) rte_eth_dev_count();
2377         if (nb_ports == 0)
2378                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2379
2380         /* allocate port structures, and init them */
2381         init_port();
2382
2383         set_def_fwd_config();
2384         if (nb_lcores == 0)
2385                 rte_panic("Empty set of forwarding logical cores - check the "
2386                           "core mask supplied in the command parameters\n");
2387
2388         /* Bitrate/latency stats disabled by default */
2389 #ifdef RTE_LIBRTE_BITRATE
2390         bitrate_enabled = 0;
2391 #endif
2392 #ifdef RTE_LIBRTE_LATENCY_STATS
2393         latencystats_enabled = 0;
2394 #endif
2395
2396         argc -= diag;
2397         argv += diag;
2398         if (argc > 1)
2399                 launch_args_parse(argc, argv);
2400
2401         if (tx_first && interactive)
2402                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2403                                 "interactive mode.\n");
2404
2405         if (tx_first && lsc_interrupt) {
2406                 printf("Warning: lsc_interrupt needs to be off when "
2407                                 " using tx_first. Disabling.\n");
2408                 lsc_interrupt = 0;
2409         }
2410
2411         if (!nb_rxq && !nb_txq)
2412                 printf("Warning: Either rx or tx queues should be non-zero\n");
2413
2414         if (nb_rxq > 1 && nb_rxq > nb_txq)
2415                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2416                        "but nb_txq=%d will prevent to fully test it.\n",
2417                        nb_rxq, nb_txq);
2418
2419         init_config();
2420         if (start_port(RTE_PORT_ALL) != 0)
2421                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2422
2423         /* set all ports to promiscuous mode by default */
2424         RTE_ETH_FOREACH_DEV(port_id)
2425                 rte_eth_promiscuous_enable(port_id);
2426
2427         /* Init metrics library */
2428         rte_metrics_init(rte_socket_id());
2429
2430 #ifdef RTE_LIBRTE_LATENCY_STATS
2431         if (latencystats_enabled != 0) {
2432                 int ret = rte_latencystats_init(1, NULL);
2433                 if (ret)
2434                         printf("Warning: latencystats init()"
2435                                 " returned error %d\n", ret);
2436                 printf("Latencystats running on lcore %d\n",
2437                         latencystats_lcore_id);
2438         }
2439 #endif
2440
2441         /* Setup bitrate stats */
2442 #ifdef RTE_LIBRTE_BITRATE
2443         if (bitrate_enabled != 0) {
2444                 bitrate_data = rte_stats_bitrate_create();
2445                 if (bitrate_data == NULL)
2446                         rte_exit(EXIT_FAILURE,
2447                                 "Could not allocate bitrate data.\n");
2448                 rte_stats_bitrate_reg(bitrate_data);
2449         }
2450 #endif
2451
2452 #ifdef RTE_LIBRTE_CMDLINE
2453         if (strlen(cmdline_filename) != 0)
2454                 cmdline_read_from_file(cmdline_filename);
2455
2456         if (interactive == 1) {
2457                 if (auto_start) {
2458                         printf("Start automatic packet forwarding\n");
2459                         start_packet_forwarding(0);
2460                 }
2461                 prompt();
2462                 pmd_test_exit();
2463         } else
2464 #endif
2465         {
2466                 char c;
2467                 int rc;
2468
2469                 f_quit = 0;
2470
2471                 printf("No commandline core given, start packet forwarding\n");
2472                 start_packet_forwarding(tx_first);
2473                 if (stats_period != 0) {
2474                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2475                         uint64_t timer_period;
2476
2477                         /* Convert to number of cycles */
2478                         timer_period = stats_period * rte_get_timer_hz();
2479
2480                         while (f_quit == 0) {
2481                                 cur_time = rte_get_timer_cycles();
2482                                 diff_time += cur_time - prev_time;
2483
2484                                 if (diff_time >= timer_period) {
2485                                         print_stats();
2486                                         /* Reset the timer */
2487                                         diff_time = 0;
2488                                 }
2489                                 /* Sleep to avoid unnecessary checks */
2490                                 prev_time = cur_time;
2491                                 sleep(1);
2492                         }
2493                 }
2494
2495                 printf("Press enter to exit\n");
2496                 rc = read(0, &c, 1);
2497                 pmd_test_exit();
2498                 if (rc < 0)
2499                         return 1;
2500         }
2501
2502         return 0;
2503 }