app/testpmd: add Rx HW timestamp
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91
92 #include "testpmd.h"
93
94 uint16_t verbose_level = 0; /**< Silent by default. */
95
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;        /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159         &io_fwd_engine,
160         &mac_fwd_engine,
161         &mac_swap_engine,
162         &flow_gen_engine,
163         &rx_only_engine,
164         &tx_only_engine,
165         &csum_fwd_engine,
166         &icmp_echo_engine,
167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168         &softnic_tm_engine,
169         &softnic_tm_bypass_engine,
170 #endif
171 #ifdef RTE_LIBRTE_IEEE1588
172         &ieee1588_fwd_engine,
173 #endif
174         NULL,
175 };
176
177 struct fwd_config cur_fwd_config;
178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
179 uint32_t retry_enabled;
180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182
183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
184 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
185                                       * specified on command-line. */
186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
187
188 /*
189  * In container, it cannot terminate the process which running with 'stats-period'
190  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
191  */
192 uint8_t f_quit;
193
194 /*
195  * Configuration of packet segments used by the "txonly" processing engine.
196  */
197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
199         TXONLY_DEF_PACKET_LEN,
200 };
201 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202
203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
204 /**< Split policy for packets to TX. */
205
206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208
209 /* current configuration is in DCB or not,0 means it is not in DCB mode */
210 uint8_t dcb_config = 0;
211
212 /* Whether the dcb is in testing status */
213 uint8_t dcb_test = 0;
214
215 /*
216  * Configurable number of RX/TX queues.
217  */
218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
220
221 /*
222  * Configurable number of RX/TX ring descriptors.
223  */
224 #define RTE_TEST_RX_DESC_DEFAULT 128
225 #define RTE_TEST_TX_DESC_DEFAULT 512
226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228
229 #define RTE_PMD_PARAM_UNSET -1
230 /*
231  * Configurable values of RX and TX ring threshold registers.
232  */
233
234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237
238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
241
242 /*
243  * Configurable value of RX free threshold.
244  */
245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
246
247 /*
248  * Configurable value of RX drop enable.
249  */
250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
251
252 /*
253  * Configurable value of TX free threshold.
254  */
255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
256
257 /*
258  * Configurable value of TX RS bit threshold.
259  */
260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
261
262 /*
263  * Configurable value of TX queue flags.
264  */
265 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
266
267 /*
268  * Receive Side Scaling (RSS) configuration.
269  */
270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
271
272 /*
273  * Port topology configuration
274  */
275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
276
277 /*
278  * Avoids to flush all the RX streams before starts forwarding.
279  */
280 uint8_t no_flush_rx = 0; /* flush by default */
281
282 /*
283  * Flow API isolated mode.
284  */
285 uint8_t flow_isolate_all;
286
287 /*
288  * Avoids to check link status when starting/stopping a port.
289  */
290 uint8_t no_link_check = 0; /* check by default */
291
292 /*
293  * Enable link status change notification
294  */
295 uint8_t lsc_interrupt = 1; /* enabled by default */
296
297 /*
298  * Enable device removal notification.
299  */
300 uint8_t rmv_interrupt = 1; /* enabled by default */
301
302 /*
303  * Display or mask ether events
304  * Default to all events except VF_MBOX
305  */
306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
307                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
308                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
309                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
310                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
311                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
312
313 /*
314  * NIC bypass mode configuration options.
315  */
316
317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
318 /* The NIC bypass watchdog timeout. */
319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
320 #endif
321
322
323 #ifdef RTE_LIBRTE_LATENCY_STATS
324
325 /*
326  * Set when latency stats is enabled in the commandline
327  */
328 uint8_t latencystats_enabled;
329
330 /*
331  * Lcore ID to serive latency statistics.
332  */
333 lcoreid_t latencystats_lcore_id = -1;
334
335 #endif
336
337 /*
338  * Ethernet device configuration.
339  */
340 struct rte_eth_rxmode rx_mode = {
341         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342         .split_hdr_size = 0,
343         .header_split   = 0, /**< Header Split disabled. */
344         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
345         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
346         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
347         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
348         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
349         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
350         .hw_timestamp   = 0, /**< HW timestamp enabled. */
351 };
352
353 struct rte_fdir_conf fdir_conf = {
354         .mode = RTE_FDIR_MODE_NONE,
355         .pballoc = RTE_FDIR_PBALLOC_64K,
356         .status = RTE_FDIR_REPORT_STATUS,
357         .mask = {
358                 .vlan_tci_mask = 0x0,
359                 .ipv4_mask     = {
360                         .src_ip = 0xFFFFFFFF,
361                         .dst_ip = 0xFFFFFFFF,
362                 },
363                 .ipv6_mask     = {
364                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366                 },
367                 .src_port_mask = 0xFFFF,
368                 .dst_port_mask = 0xFFFF,
369                 .mac_addr_byte_mask = 0xFF,
370                 .tunnel_type_mask = 1,
371                 .tunnel_id_mask = 0xFFFFFFFF,
372         },
373         .drop_queue = 127,
374 };
375
376 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377
378 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
379 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380
381 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
382 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383
384 uint16_t nb_tx_queue_stats_mappings = 0;
385 uint16_t nb_rx_queue_stats_mappings = 0;
386
387 unsigned int num_sockets = 0;
388 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
389
390 #ifdef RTE_LIBRTE_BITRATE
391 /* Bitrate statistics */
392 struct rte_stats_bitrates *bitrate_data;
393 lcoreid_t bitrate_lcore_id;
394 uint8_t bitrate_enabled;
395 #endif
396
397 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
398 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
399
400 /* Forward function declarations */
401 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
402 static void check_all_ports_link_status(uint32_t port_mask);
403 static int eth_event_callback(portid_t port_id,
404                               enum rte_eth_event_type type,
405                               void *param, void *ret_param);
406
407 /*
408  * Check if all the ports are started.
409  * If yes, return positive value. If not, return zero.
410  */
411 static int all_ports_started(void);
412
413 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
414 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
415
416 /*
417  * Helper function to check if socket is already discovered.
418  * If yes, return positive value. If not, return zero.
419  */
420 int
421 new_socket_id(unsigned int socket_id)
422 {
423         unsigned int i;
424
425         for (i = 0; i < num_sockets; i++) {
426                 if (socket_ids[i] == socket_id)
427                         return 0;
428         }
429         return 1;
430 }
431
432 /*
433  * Setup default configuration.
434  */
435 static void
436 set_default_fwd_lcores_config(void)
437 {
438         unsigned int i;
439         unsigned int nb_lc;
440         unsigned int sock_num;
441
442         nb_lc = 0;
443         for (i = 0; i < RTE_MAX_LCORE; i++) {
444                 sock_num = rte_lcore_to_socket_id(i);
445                 if (new_socket_id(sock_num)) {
446                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
447                                 rte_exit(EXIT_FAILURE,
448                                          "Total sockets greater than %u\n",
449                                          RTE_MAX_NUMA_NODES);
450                         }
451                         socket_ids[num_sockets++] = sock_num;
452                 }
453                 if (!rte_lcore_is_enabled(i))
454                         continue;
455                 if (i == rte_get_master_lcore())
456                         continue;
457                 fwd_lcores_cpuids[nb_lc++] = i;
458         }
459         nb_lcores = (lcoreid_t) nb_lc;
460         nb_cfg_lcores = nb_lcores;
461         nb_fwd_lcores = 1;
462 }
463
464 static void
465 set_def_peer_eth_addrs(void)
466 {
467         portid_t i;
468
469         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
470                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
471                 peer_eth_addrs[i].addr_bytes[5] = i;
472         }
473 }
474
475 static void
476 set_default_fwd_ports_config(void)
477 {
478         portid_t pt_id;
479         int i = 0;
480
481         RTE_ETH_FOREACH_DEV(pt_id)
482                 fwd_ports_ids[i++] = pt_id;
483
484         nb_cfg_ports = nb_ports;
485         nb_fwd_ports = nb_ports;
486 }
487
488 void
489 set_def_fwd_config(void)
490 {
491         set_default_fwd_lcores_config();
492         set_def_peer_eth_addrs();
493         set_default_fwd_ports_config();
494 }
495
496 /*
497  * Configuration initialisation done once at init time.
498  */
499 static void
500 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
501                  unsigned int socket_id)
502 {
503         char pool_name[RTE_MEMPOOL_NAMESIZE];
504         struct rte_mempool *rte_mp = NULL;
505         uint32_t mb_size;
506
507         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
508         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
509
510         RTE_LOG(INFO, USER1,
511                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
512                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
513
514         if (mp_anon != 0) {
515                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
516                         mb_size, (unsigned) mb_mempool_cache,
517                         sizeof(struct rte_pktmbuf_pool_private),
518                         socket_id, 0);
519                 if (rte_mp == NULL)
520                         goto err;
521
522                 if (rte_mempool_populate_anon(rte_mp) == 0) {
523                         rte_mempool_free(rte_mp);
524                         rte_mp = NULL;
525                         goto err;
526                 }
527                 rte_pktmbuf_pool_init(rte_mp, NULL);
528                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
529         } else {
530                 /* wrapper to rte_mempool_create() */
531                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
532                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
533         }
534
535 err:
536         if (rte_mp == NULL) {
537                 rte_exit(EXIT_FAILURE,
538                         "Creation of mbuf pool for socket %u failed: %s\n",
539                         socket_id, rte_strerror(rte_errno));
540         } else if (verbose_level > 0) {
541                 rte_mempool_dump(stdout, rte_mp);
542         }
543 }
544
545 /*
546  * Check given socket id is valid or not with NUMA mode,
547  * if valid, return 0, else return -1
548  */
549 static int
550 check_socket_id(const unsigned int socket_id)
551 {
552         static int warning_once = 0;
553
554         if (new_socket_id(socket_id)) {
555                 if (!warning_once && numa_support)
556                         printf("Warning: NUMA should be configured manually by"
557                                " using --port-numa-config and"
558                                " --ring-numa-config parameters along with"
559                                " --numa.\n");
560                 warning_once = 1;
561                 return -1;
562         }
563         return 0;
564 }
565
566 static void
567 init_config(void)
568 {
569         portid_t pid;
570         struct rte_port *port;
571         struct rte_mempool *mbp;
572         unsigned int nb_mbuf_per_pool;
573         lcoreid_t  lc_id;
574         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
575         struct rte_gro_param gro_param;
576         uint32_t gso_types;
577
578         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
579
580         if (numa_support) {
581                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
582                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
583                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
584         }
585
586         /* Configuration of logical cores. */
587         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
588                                 sizeof(struct fwd_lcore *) * nb_lcores,
589                                 RTE_CACHE_LINE_SIZE);
590         if (fwd_lcores == NULL) {
591                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
592                                                         "failed\n", nb_lcores);
593         }
594         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
595                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
596                                                sizeof(struct fwd_lcore),
597                                                RTE_CACHE_LINE_SIZE);
598                 if (fwd_lcores[lc_id] == NULL) {
599                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
600                                                                 "failed\n");
601                 }
602                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
603         }
604
605         RTE_ETH_FOREACH_DEV(pid) {
606                 port = &ports[pid];
607                 rte_eth_dev_info_get(pid, &port->dev_info);
608
609                 if (numa_support) {
610                         if (port_numa[pid] != NUMA_NO_CONFIG)
611                                 port_per_socket[port_numa[pid]]++;
612                         else {
613                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
614
615                                 /* if socket_id is invalid, set to 0 */
616                                 if (check_socket_id(socket_id) < 0)
617                                         socket_id = 0;
618                                 port_per_socket[socket_id]++;
619                         }
620                 }
621
622                 /* set flag to initialize port/queue */
623                 port->need_reconfig = 1;
624                 port->need_reconfig_queues = 1;
625         }
626
627         /*
628          * Create pools of mbuf.
629          * If NUMA support is disabled, create a single pool of mbuf in
630          * socket 0 memory by default.
631          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
632          *
633          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
634          * nb_txd can be configured at run time.
635          */
636         if (param_total_num_mbufs)
637                 nb_mbuf_per_pool = param_total_num_mbufs;
638         else {
639                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
640                         (nb_lcores * mb_mempool_cache) +
641                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
642                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
643         }
644
645         if (numa_support) {
646                 uint8_t i;
647
648                 for (i = 0; i < num_sockets; i++)
649                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
650                                          socket_ids[i]);
651         } else {
652                 if (socket_num == UMA_NO_CONFIG)
653                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
654                 else
655                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
656                                                  socket_num);
657         }
658
659         init_port_config();
660
661         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
662                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
663         /*
664          * Records which Mbuf pool to use by each logical core, if needed.
665          */
666         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
667                 mbp = mbuf_pool_find(
668                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
669
670                 if (mbp == NULL)
671                         mbp = mbuf_pool_find(0);
672                 fwd_lcores[lc_id]->mbp = mbp;
673                 /* initialize GSO context */
674                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
675                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
676                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
677                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
678                         ETHER_CRC_LEN;
679                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
680         }
681
682         /* Configuration of packet forwarding streams. */
683         if (init_fwd_streams() < 0)
684                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
685
686         fwd_config_setup();
687
688         /* create a gro context for each lcore */
689         gro_param.gro_types = RTE_GRO_TCP_IPV4;
690         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
691         gro_param.max_item_per_flow = MAX_PKT_BURST;
692         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
693                 gro_param.socket_id = rte_lcore_to_socket_id(
694                                 fwd_lcores_cpuids[lc_id]);
695                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
696                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
697                         rte_exit(EXIT_FAILURE,
698                                         "rte_gro_ctx_create() failed\n");
699                 }
700         }
701 }
702
703
704 void
705 reconfig(portid_t new_port_id, unsigned socket_id)
706 {
707         struct rte_port *port;
708
709         /* Reconfiguration of Ethernet ports. */
710         port = &ports[new_port_id];
711         rte_eth_dev_info_get(new_port_id, &port->dev_info);
712
713         /* set flag to initialize port/queue */
714         port->need_reconfig = 1;
715         port->need_reconfig_queues = 1;
716         port->socket_id = socket_id;
717
718         init_port_config();
719 }
720
721
722 int
723 init_fwd_streams(void)
724 {
725         portid_t pid;
726         struct rte_port *port;
727         streamid_t sm_id, nb_fwd_streams_new;
728         queueid_t q;
729
730         /* set socket id according to numa or not */
731         RTE_ETH_FOREACH_DEV(pid) {
732                 port = &ports[pid];
733                 if (nb_rxq > port->dev_info.max_rx_queues) {
734                         printf("Fail: nb_rxq(%d) is greater than "
735                                 "max_rx_queues(%d)\n", nb_rxq,
736                                 port->dev_info.max_rx_queues);
737                         return -1;
738                 }
739                 if (nb_txq > port->dev_info.max_tx_queues) {
740                         printf("Fail: nb_txq(%d) is greater than "
741                                 "max_tx_queues(%d)\n", nb_txq,
742                                 port->dev_info.max_tx_queues);
743                         return -1;
744                 }
745                 if (numa_support) {
746                         if (port_numa[pid] != NUMA_NO_CONFIG)
747                                 port->socket_id = port_numa[pid];
748                         else {
749                                 port->socket_id = rte_eth_dev_socket_id(pid);
750
751                                 /* if socket_id is invalid, set to 0 */
752                                 if (check_socket_id(port->socket_id) < 0)
753                                         port->socket_id = 0;
754                         }
755                 }
756                 else {
757                         if (socket_num == UMA_NO_CONFIG)
758                                 port->socket_id = 0;
759                         else
760                                 port->socket_id = socket_num;
761                 }
762         }
763
764         q = RTE_MAX(nb_rxq, nb_txq);
765         if (q == 0) {
766                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
767                 return -1;
768         }
769         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
770         if (nb_fwd_streams_new == nb_fwd_streams)
771                 return 0;
772         /* clear the old */
773         if (fwd_streams != NULL) {
774                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
775                         if (fwd_streams[sm_id] == NULL)
776                                 continue;
777                         rte_free(fwd_streams[sm_id]);
778                         fwd_streams[sm_id] = NULL;
779                 }
780                 rte_free(fwd_streams);
781                 fwd_streams = NULL;
782         }
783
784         /* init new */
785         nb_fwd_streams = nb_fwd_streams_new;
786         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
787                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
788         if (fwd_streams == NULL)
789                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
790                                                 "failed\n", nb_fwd_streams);
791
792         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
793                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
794                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
795                 if (fwd_streams[sm_id] == NULL)
796                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
797                                                                 " failed\n");
798         }
799
800         return 0;
801 }
802
803 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
804 static void
805 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
806 {
807         unsigned int total_burst;
808         unsigned int nb_burst;
809         unsigned int burst_stats[3];
810         uint16_t pktnb_stats[3];
811         uint16_t nb_pkt;
812         int burst_percent[3];
813
814         /*
815          * First compute the total number of packet bursts and the
816          * two highest numbers of bursts of the same number of packets.
817          */
818         total_burst = 0;
819         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
820         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
821         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
822                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
823                 if (nb_burst == 0)
824                         continue;
825                 total_burst += nb_burst;
826                 if (nb_burst > burst_stats[0]) {
827                         burst_stats[1] = burst_stats[0];
828                         pktnb_stats[1] = pktnb_stats[0];
829                         burst_stats[0] = nb_burst;
830                         pktnb_stats[0] = nb_pkt;
831                 }
832         }
833         if (total_burst == 0)
834                 return;
835         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
836         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
837                burst_percent[0], (int) pktnb_stats[0]);
838         if (burst_stats[0] == total_burst) {
839                 printf("]\n");
840                 return;
841         }
842         if (burst_stats[0] + burst_stats[1] == total_burst) {
843                 printf(" + %d%% of %d pkts]\n",
844                        100 - burst_percent[0], pktnb_stats[1]);
845                 return;
846         }
847         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
848         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
849         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
850                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
851                 return;
852         }
853         printf(" + %d%% of %d pkts + %d%% of others]\n",
854                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
855 }
856 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
857
858 static void
859 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
860 {
861         struct rte_port *port;
862         uint8_t i;
863
864         static const char *fwd_stats_border = "----------------------";
865
866         port = &ports[port_id];
867         printf("\n  %s Forward statistics for port %-2d %s\n",
868                fwd_stats_border, port_id, fwd_stats_border);
869
870         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
871                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
872                        "%-"PRIu64"\n",
873                        stats->ipackets, stats->imissed,
874                        (uint64_t) (stats->ipackets + stats->imissed));
875
876                 if (cur_fwd_eng == &csum_fwd_engine)
877                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
878                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
879                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
880                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
881                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
882                 }
883
884                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
885                        "%-"PRIu64"\n",
886                        stats->opackets, port->tx_dropped,
887                        (uint64_t) (stats->opackets + port->tx_dropped));
888         }
889         else {
890                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
891                        "%14"PRIu64"\n",
892                        stats->ipackets, stats->imissed,
893                        (uint64_t) (stats->ipackets + stats->imissed));
894
895                 if (cur_fwd_eng == &csum_fwd_engine)
896                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
897                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
898                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
899                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
900                         printf("  RX-nombufs:             %14"PRIu64"\n",
901                                stats->rx_nombuf);
902                 }
903
904                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
905                        "%14"PRIu64"\n",
906                        stats->opackets, port->tx_dropped,
907                        (uint64_t) (stats->opackets + port->tx_dropped));
908         }
909
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911         if (port->rx_stream)
912                 pkt_burst_stats_display("RX",
913                         &port->rx_stream->rx_burst_stats);
914         if (port->tx_stream)
915                 pkt_burst_stats_display("TX",
916                         &port->tx_stream->tx_burst_stats);
917 #endif
918
919         if (port->rx_queue_stats_mapping_enabled) {
920                 printf("\n");
921                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
922                         printf("  Stats reg %2d RX-packets:%14"PRIu64
923                                "     RX-errors:%14"PRIu64
924                                "    RX-bytes:%14"PRIu64"\n",
925                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
926                 }
927                 printf("\n");
928         }
929         if (port->tx_queue_stats_mapping_enabled) {
930                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
931                         printf("  Stats reg %2d TX-packets:%14"PRIu64
932                                "                                 TX-bytes:%14"PRIu64"\n",
933                                i, stats->q_opackets[i], stats->q_obytes[i]);
934                 }
935         }
936
937         printf("  %s--------------------------------%s\n",
938                fwd_stats_border, fwd_stats_border);
939 }
940
941 static void
942 fwd_stream_stats_display(streamid_t stream_id)
943 {
944         struct fwd_stream *fs;
945         static const char *fwd_top_stats_border = "-------";
946
947         fs = fwd_streams[stream_id];
948         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
949             (fs->fwd_dropped == 0))
950                 return;
951         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
952                "TX Port=%2d/Queue=%2d %s\n",
953                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
954                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
955         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
956                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
957
958         /* if checksum mode */
959         if (cur_fwd_eng == &csum_fwd_engine) {
960                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
961                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
962         }
963
964 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
965         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
966         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
967 #endif
968 }
969
970 static void
971 flush_fwd_rx_queues(void)
972 {
973         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
974         portid_t  rxp;
975         portid_t port_id;
976         queueid_t rxq;
977         uint16_t  nb_rx;
978         uint16_t  i;
979         uint8_t   j;
980         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
981         uint64_t timer_period;
982
983         /* convert to number of cycles */
984         timer_period = rte_get_timer_hz(); /* 1 second timeout */
985
986         for (j = 0; j < 2; j++) {
987                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
988                         for (rxq = 0; rxq < nb_rxq; rxq++) {
989                                 port_id = fwd_ports_ids[rxp];
990                                 /**
991                                 * testpmd can stuck in the below do while loop
992                                 * if rte_eth_rx_burst() always returns nonzero
993                                 * packets. So timer is added to exit this loop
994                                 * after 1sec timer expiry.
995                                 */
996                                 prev_tsc = rte_rdtsc();
997                                 do {
998                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
999                                                 pkts_burst, MAX_PKT_BURST);
1000                                         for (i = 0; i < nb_rx; i++)
1001                                                 rte_pktmbuf_free(pkts_burst[i]);
1002
1003                                         cur_tsc = rte_rdtsc();
1004                                         diff_tsc = cur_tsc - prev_tsc;
1005                                         timer_tsc += diff_tsc;
1006                                 } while ((nb_rx > 0) &&
1007                                         (timer_tsc < timer_period));
1008                                 timer_tsc = 0;
1009                         }
1010                 }
1011                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1012         }
1013 }
1014
1015 static void
1016 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1017 {
1018         struct fwd_stream **fsm;
1019         streamid_t nb_fs;
1020         streamid_t sm_id;
1021 #ifdef RTE_LIBRTE_BITRATE
1022         uint64_t tics_per_1sec;
1023         uint64_t tics_datum;
1024         uint64_t tics_current;
1025         uint8_t idx_port, cnt_ports;
1026
1027         cnt_ports = rte_eth_dev_count();
1028         tics_datum = rte_rdtsc();
1029         tics_per_1sec = rte_get_timer_hz();
1030 #endif
1031         fsm = &fwd_streams[fc->stream_idx];
1032         nb_fs = fc->stream_nb;
1033         do {
1034                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1035                         (*pkt_fwd)(fsm[sm_id]);
1036 #ifdef RTE_LIBRTE_BITRATE
1037                 if (bitrate_enabled != 0 &&
1038                                 bitrate_lcore_id == rte_lcore_id()) {
1039                         tics_current = rte_rdtsc();
1040                         if (tics_current - tics_datum >= tics_per_1sec) {
1041                                 /* Periodic bitrate calculation */
1042                                 for (idx_port = 0;
1043                                                 idx_port < cnt_ports;
1044                                                 idx_port++)
1045                                         rte_stats_bitrate_calc(bitrate_data,
1046                                                 idx_port);
1047                                 tics_datum = tics_current;
1048                         }
1049                 }
1050 #endif
1051 #ifdef RTE_LIBRTE_LATENCY_STATS
1052                 if (latencystats_enabled != 0 &&
1053                                 latencystats_lcore_id == rte_lcore_id())
1054                         rte_latencystats_update();
1055 #endif
1056
1057         } while (! fc->stopped);
1058 }
1059
1060 static int
1061 start_pkt_forward_on_core(void *fwd_arg)
1062 {
1063         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1064                              cur_fwd_config.fwd_eng->packet_fwd);
1065         return 0;
1066 }
1067
1068 /*
1069  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1070  * Used to start communication flows in network loopback test configurations.
1071  */
1072 static int
1073 run_one_txonly_burst_on_core(void *fwd_arg)
1074 {
1075         struct fwd_lcore *fwd_lc;
1076         struct fwd_lcore tmp_lcore;
1077
1078         fwd_lc = (struct fwd_lcore *) fwd_arg;
1079         tmp_lcore = *fwd_lc;
1080         tmp_lcore.stopped = 1;
1081         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1082         return 0;
1083 }
1084
1085 /*
1086  * Launch packet forwarding:
1087  *     - Setup per-port forwarding context.
1088  *     - launch logical cores with their forwarding configuration.
1089  */
1090 static void
1091 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1092 {
1093         port_fwd_begin_t port_fwd_begin;
1094         unsigned int i;
1095         unsigned int lc_id;
1096         int diag;
1097
1098         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1099         if (port_fwd_begin != NULL) {
1100                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1101                         (*port_fwd_begin)(fwd_ports_ids[i]);
1102         }
1103         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1104                 lc_id = fwd_lcores_cpuids[i];
1105                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1106                         fwd_lcores[i]->stopped = 0;
1107                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1108                                                      fwd_lcores[i], lc_id);
1109                         if (diag != 0)
1110                                 printf("launch lcore %u failed - diag=%d\n",
1111                                        lc_id, diag);
1112                 }
1113         }
1114 }
1115
1116 /*
1117  * Launch packet forwarding configuration.
1118  */
1119 void
1120 start_packet_forwarding(int with_tx_first)
1121 {
1122         port_fwd_begin_t port_fwd_begin;
1123         port_fwd_end_t  port_fwd_end;
1124         struct rte_port *port;
1125         unsigned int i;
1126         portid_t   pt_id;
1127         streamid_t sm_id;
1128
1129         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1130                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1131
1132         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1133                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1134
1135         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1136                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1137                 (!nb_rxq || !nb_txq))
1138                 rte_exit(EXIT_FAILURE,
1139                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1140                         cur_fwd_eng->fwd_mode_name);
1141
1142         if (all_ports_started() == 0) {
1143                 printf("Not all ports were started\n");
1144                 return;
1145         }
1146         if (test_done == 0) {
1147                 printf("Packet forwarding already started\n");
1148                 return;
1149         }
1150
1151         if (init_fwd_streams() < 0) {
1152                 printf("Fail from init_fwd_streams()\n");
1153                 return;
1154         }
1155
1156         if(dcb_test) {
1157                 for (i = 0; i < nb_fwd_ports; i++) {
1158                         pt_id = fwd_ports_ids[i];
1159                         port = &ports[pt_id];
1160                         if (!port->dcb_flag) {
1161                                 printf("In DCB mode, all forwarding ports must "
1162                                        "be configured in this mode.\n");
1163                                 return;
1164                         }
1165                 }
1166                 if (nb_fwd_lcores == 1) {
1167                         printf("In DCB mode,the nb forwarding cores "
1168                                "should be larger than 1.\n");
1169                         return;
1170                 }
1171         }
1172         test_done = 0;
1173
1174         if(!no_flush_rx)
1175                 flush_fwd_rx_queues();
1176
1177         fwd_config_setup();
1178         pkt_fwd_config_display(&cur_fwd_config);
1179         rxtx_config_display();
1180
1181         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182                 pt_id = fwd_ports_ids[i];
1183                 port = &ports[pt_id];
1184                 rte_eth_stats_get(pt_id, &port->stats);
1185                 port->tx_dropped = 0;
1186
1187                 map_port_queue_stats_mapping_registers(pt_id, port);
1188         }
1189         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1190                 fwd_streams[sm_id]->rx_packets = 0;
1191                 fwd_streams[sm_id]->tx_packets = 0;
1192                 fwd_streams[sm_id]->fwd_dropped = 0;
1193                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1194                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1195
1196 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1197                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1198                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1199                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1200                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1201 #endif
1202 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1203                 fwd_streams[sm_id]->core_cycles = 0;
1204 #endif
1205         }
1206         if (with_tx_first) {
1207                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1208                 if (port_fwd_begin != NULL) {
1209                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1210                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1211                 }
1212                 while (with_tx_first--) {
1213                         launch_packet_forwarding(
1214                                         run_one_txonly_burst_on_core);
1215                         rte_eal_mp_wait_lcore();
1216                 }
1217                 port_fwd_end = tx_only_engine.port_fwd_end;
1218                 if (port_fwd_end != NULL) {
1219                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1220                                 (*port_fwd_end)(fwd_ports_ids[i]);
1221                 }
1222         }
1223         launch_packet_forwarding(start_pkt_forward_on_core);
1224 }
1225
1226 void
1227 stop_packet_forwarding(void)
1228 {
1229         struct rte_eth_stats stats;
1230         struct rte_port *port;
1231         port_fwd_end_t  port_fwd_end;
1232         int i;
1233         portid_t   pt_id;
1234         streamid_t sm_id;
1235         lcoreid_t  lc_id;
1236         uint64_t total_recv;
1237         uint64_t total_xmit;
1238         uint64_t total_rx_dropped;
1239         uint64_t total_tx_dropped;
1240         uint64_t total_rx_nombuf;
1241         uint64_t tx_dropped;
1242         uint64_t rx_bad_ip_csum;
1243         uint64_t rx_bad_l4_csum;
1244 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1245         uint64_t fwd_cycles;
1246 #endif
1247
1248         static const char *acc_stats_border = "+++++++++++++++";
1249
1250         if (test_done) {
1251                 printf("Packet forwarding not started\n");
1252                 return;
1253         }
1254         printf("Telling cores to stop...");
1255         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1256                 fwd_lcores[lc_id]->stopped = 1;
1257         printf("\nWaiting for lcores to finish...\n");
1258         rte_eal_mp_wait_lcore();
1259         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1260         if (port_fwd_end != NULL) {
1261                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1262                         pt_id = fwd_ports_ids[i];
1263                         (*port_fwd_end)(pt_id);
1264                 }
1265         }
1266 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1267         fwd_cycles = 0;
1268 #endif
1269         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1270                 if (cur_fwd_config.nb_fwd_streams >
1271                     cur_fwd_config.nb_fwd_ports) {
1272                         fwd_stream_stats_display(sm_id);
1273                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1274                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1275                 } else {
1276                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1277                                 fwd_streams[sm_id];
1278                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1279                                 fwd_streams[sm_id];
1280                 }
1281                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1282                 tx_dropped = (uint64_t) (tx_dropped +
1283                                          fwd_streams[sm_id]->fwd_dropped);
1284                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1285
1286                 rx_bad_ip_csum =
1287                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1288                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1289                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1290                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1291                                                         rx_bad_ip_csum;
1292
1293                 rx_bad_l4_csum =
1294                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1295                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1296                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1297                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1298                                                         rx_bad_l4_csum;
1299
1300 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1301                 fwd_cycles = (uint64_t) (fwd_cycles +
1302                                          fwd_streams[sm_id]->core_cycles);
1303 #endif
1304         }
1305         total_recv = 0;
1306         total_xmit = 0;
1307         total_rx_dropped = 0;
1308         total_tx_dropped = 0;
1309         total_rx_nombuf  = 0;
1310         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1311                 pt_id = fwd_ports_ids[i];
1312
1313                 port = &ports[pt_id];
1314                 rte_eth_stats_get(pt_id, &stats);
1315                 stats.ipackets -= port->stats.ipackets;
1316                 port->stats.ipackets = 0;
1317                 stats.opackets -= port->stats.opackets;
1318                 port->stats.opackets = 0;
1319                 stats.ibytes   -= port->stats.ibytes;
1320                 port->stats.ibytes = 0;
1321                 stats.obytes   -= port->stats.obytes;
1322                 port->stats.obytes = 0;
1323                 stats.imissed  -= port->stats.imissed;
1324                 port->stats.imissed = 0;
1325                 stats.oerrors  -= port->stats.oerrors;
1326                 port->stats.oerrors = 0;
1327                 stats.rx_nombuf -= port->stats.rx_nombuf;
1328                 port->stats.rx_nombuf = 0;
1329
1330                 total_recv += stats.ipackets;
1331                 total_xmit += stats.opackets;
1332                 total_rx_dropped += stats.imissed;
1333                 total_tx_dropped += port->tx_dropped;
1334                 total_rx_nombuf  += stats.rx_nombuf;
1335
1336                 fwd_port_stats_display(pt_id, &stats);
1337         }
1338
1339         printf("\n  %s Accumulated forward statistics for all ports"
1340                "%s\n",
1341                acc_stats_border, acc_stats_border);
1342         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1343                "%-"PRIu64"\n"
1344                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1345                "%-"PRIu64"\n",
1346                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1347                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1348         if (total_rx_nombuf > 0)
1349                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1350         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1351                "%s\n",
1352                acc_stats_border, acc_stats_border);
1353 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1354         if (total_recv > 0)
1355                 printf("\n  CPU cycles/packet=%u (total cycles="
1356                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1357                        (unsigned int)(fwd_cycles / total_recv),
1358                        fwd_cycles, total_recv);
1359 #endif
1360         printf("\nDone.\n");
1361         test_done = 1;
1362 }
1363
1364 void
1365 dev_set_link_up(portid_t pid)
1366 {
1367         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1368                 printf("\nSet link up fail.\n");
1369 }
1370
1371 void
1372 dev_set_link_down(portid_t pid)
1373 {
1374         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1375                 printf("\nSet link down fail.\n");
1376 }
1377
1378 static int
1379 all_ports_started(void)
1380 {
1381         portid_t pi;
1382         struct rte_port *port;
1383
1384         RTE_ETH_FOREACH_DEV(pi) {
1385                 port = &ports[pi];
1386                 /* Check if there is a port which is not started */
1387                 if ((port->port_status != RTE_PORT_STARTED) &&
1388                         (port->slave_flag == 0))
1389                         return 0;
1390         }
1391
1392         /* No port is not started */
1393         return 1;
1394 }
1395
1396 int
1397 all_ports_stopped(void)
1398 {
1399         portid_t pi;
1400         struct rte_port *port;
1401
1402         RTE_ETH_FOREACH_DEV(pi) {
1403                 port = &ports[pi];
1404                 if ((port->port_status != RTE_PORT_STOPPED) &&
1405                         (port->slave_flag == 0))
1406                         return 0;
1407         }
1408
1409         return 1;
1410 }
1411
1412 int
1413 port_is_started(portid_t port_id)
1414 {
1415         if (port_id_is_invalid(port_id, ENABLED_WARN))
1416                 return 0;
1417
1418         if (ports[port_id].port_status != RTE_PORT_STARTED)
1419                 return 0;
1420
1421         return 1;
1422 }
1423
1424 static int
1425 port_is_closed(portid_t port_id)
1426 {
1427         if (port_id_is_invalid(port_id, ENABLED_WARN))
1428                 return 0;
1429
1430         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1431                 return 0;
1432
1433         return 1;
1434 }
1435
1436 int
1437 start_port(portid_t pid)
1438 {
1439         int diag, need_check_link_status = -1;
1440         portid_t pi;
1441         queueid_t qi;
1442         struct rte_port *port;
1443         struct ether_addr mac_addr;
1444         enum rte_eth_event_type event_type;
1445
1446         if (port_id_is_invalid(pid, ENABLED_WARN))
1447                 return 0;
1448
1449         if(dcb_config)
1450                 dcb_test = 1;
1451         RTE_ETH_FOREACH_DEV(pi) {
1452                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1453                         continue;
1454
1455                 need_check_link_status = 0;
1456                 port = &ports[pi];
1457                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1458                                                  RTE_PORT_HANDLING) == 0) {
1459                         printf("Port %d is now not stopped\n", pi);
1460                         continue;
1461                 }
1462
1463                 if (port->need_reconfig > 0) {
1464                         port->need_reconfig = 0;
1465
1466                         if (flow_isolate_all) {
1467                                 int ret = port_flow_isolate(pi, 1);
1468                                 if (ret) {
1469                                         printf("Failed to apply isolated"
1470                                                " mode on port %d\n", pi);
1471                                         return -1;
1472                                 }
1473                         }
1474
1475                         printf("Configuring Port %d (socket %u)\n", pi,
1476                                         port->socket_id);
1477                         /* configure port */
1478                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1479                                                 &(port->dev_conf));
1480                         if (diag != 0) {
1481                                 if (rte_atomic16_cmpset(&(port->port_status),
1482                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1483                                         printf("Port %d can not be set back "
1484                                                         "to stopped\n", pi);
1485                                 printf("Fail to configure port %d\n", pi);
1486                                 /* try to reconfigure port next time */
1487                                 port->need_reconfig = 1;
1488                                 return -1;
1489                         }
1490                 }
1491                 if (port->need_reconfig_queues > 0) {
1492                         port->need_reconfig_queues = 0;
1493                         /* setup tx queues */
1494                         for (qi = 0; qi < nb_txq; qi++) {
1495                                 if ((numa_support) &&
1496                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1497                                         diag = rte_eth_tx_queue_setup(pi, qi,
1498                                                 nb_txd,txring_numa[pi],
1499                                                 &(port->tx_conf));
1500                                 else
1501                                         diag = rte_eth_tx_queue_setup(pi, qi,
1502                                                 nb_txd,port->socket_id,
1503                                                 &(port->tx_conf));
1504
1505                                 if (diag == 0)
1506                                         continue;
1507
1508                                 /* Fail to setup tx queue, return */
1509                                 if (rte_atomic16_cmpset(&(port->port_status),
1510                                                         RTE_PORT_HANDLING,
1511                                                         RTE_PORT_STOPPED) == 0)
1512                                         printf("Port %d can not be set back "
1513                                                         "to stopped\n", pi);
1514                                 printf("Fail to configure port %d tx queues\n", pi);
1515                                 /* try to reconfigure queues next time */
1516                                 port->need_reconfig_queues = 1;
1517                                 return -1;
1518                         }
1519                         /* setup rx queues */
1520                         for (qi = 0; qi < nb_rxq; qi++) {
1521                                 if ((numa_support) &&
1522                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1523                                         struct rte_mempool * mp =
1524                                                 mbuf_pool_find(rxring_numa[pi]);
1525                                         if (mp == NULL) {
1526                                                 printf("Failed to setup RX queue:"
1527                                                         "No mempool allocation"
1528                                                         " on the socket %d\n",
1529                                                         rxring_numa[pi]);
1530                                                 return -1;
1531                                         }
1532
1533                                         diag = rte_eth_rx_queue_setup(pi, qi,
1534                                              nb_rxd,rxring_numa[pi],
1535                                              &(port->rx_conf),mp);
1536                                 } else {
1537                                         struct rte_mempool *mp =
1538                                                 mbuf_pool_find(port->socket_id);
1539                                         if (mp == NULL) {
1540                                                 printf("Failed to setup RX queue:"
1541                                                         "No mempool allocation"
1542                                                         " on the socket %d\n",
1543                                                         port->socket_id);
1544                                                 return -1;
1545                                         }
1546                                         diag = rte_eth_rx_queue_setup(pi, qi,
1547                                              nb_rxd,port->socket_id,
1548                                              &(port->rx_conf), mp);
1549                                 }
1550                                 if (diag == 0)
1551                                         continue;
1552
1553                                 /* Fail to setup rx queue, return */
1554                                 if (rte_atomic16_cmpset(&(port->port_status),
1555                                                         RTE_PORT_HANDLING,
1556                                                         RTE_PORT_STOPPED) == 0)
1557                                         printf("Port %d can not be set back "
1558                                                         "to stopped\n", pi);
1559                                 printf("Fail to configure port %d rx queues\n", pi);
1560                                 /* try to reconfigure queues next time */
1561                                 port->need_reconfig_queues = 1;
1562                                 return -1;
1563                         }
1564                 }
1565
1566                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1567                      event_type < RTE_ETH_EVENT_MAX;
1568                      event_type++) {
1569                         diag = rte_eth_dev_callback_register(pi,
1570                                                         event_type,
1571                                                         eth_event_callback,
1572                                                         NULL);
1573                         if (diag) {
1574                                 printf("Failed to setup even callback for event %d\n",
1575                                         event_type);
1576                                 return -1;
1577                         }
1578                 }
1579
1580                 /* start port */
1581                 if (rte_eth_dev_start(pi) < 0) {
1582                         printf("Fail to start port %d\n", pi);
1583
1584                         /* Fail to setup rx queue, return */
1585                         if (rte_atomic16_cmpset(&(port->port_status),
1586                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1587                                 printf("Port %d can not be set back to "
1588                                                         "stopped\n", pi);
1589                         continue;
1590                 }
1591
1592                 if (rte_atomic16_cmpset(&(port->port_status),
1593                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1594                         printf("Port %d can not be set into started\n", pi);
1595
1596                 rte_eth_macaddr_get(pi, &mac_addr);
1597                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1598                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1599                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1600                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1601
1602                 /* at least one port started, need checking link status */
1603                 need_check_link_status = 1;
1604         }
1605
1606         if (need_check_link_status == 1 && !no_link_check)
1607                 check_all_ports_link_status(RTE_PORT_ALL);
1608         else if (need_check_link_status == 0)
1609                 printf("Please stop the ports first\n");
1610
1611         printf("Done\n");
1612         return 0;
1613 }
1614
1615 void
1616 stop_port(portid_t pid)
1617 {
1618         portid_t pi;
1619         struct rte_port *port;
1620         int need_check_link_status = 0;
1621
1622         if (dcb_test) {
1623                 dcb_test = 0;
1624                 dcb_config = 0;
1625         }
1626
1627         if (port_id_is_invalid(pid, ENABLED_WARN))
1628                 return;
1629
1630         printf("Stopping ports...\n");
1631
1632         RTE_ETH_FOREACH_DEV(pi) {
1633                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1634                         continue;
1635
1636                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1637                         printf("Please remove port %d from forwarding configuration.\n", pi);
1638                         continue;
1639                 }
1640
1641                 if (port_is_bonding_slave(pi)) {
1642                         printf("Please remove port %d from bonded device.\n", pi);
1643                         continue;
1644                 }
1645
1646                 port = &ports[pi];
1647                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1648                                                 RTE_PORT_HANDLING) == 0)
1649                         continue;
1650
1651                 rte_eth_dev_stop(pi);
1652
1653                 if (rte_atomic16_cmpset(&(port->port_status),
1654                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1655                         printf("Port %d can not be set into stopped\n", pi);
1656                 need_check_link_status = 1;
1657         }
1658         if (need_check_link_status && !no_link_check)
1659                 check_all_ports_link_status(RTE_PORT_ALL);
1660
1661         printf("Done\n");
1662 }
1663
1664 void
1665 close_port(portid_t pid)
1666 {
1667         portid_t pi;
1668         struct rte_port *port;
1669
1670         if (port_id_is_invalid(pid, ENABLED_WARN))
1671                 return;
1672
1673         printf("Closing ports...\n");
1674
1675         RTE_ETH_FOREACH_DEV(pi) {
1676                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1677                         continue;
1678
1679                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1680                         printf("Please remove port %d from forwarding configuration.\n", pi);
1681                         continue;
1682                 }
1683
1684                 if (port_is_bonding_slave(pi)) {
1685                         printf("Please remove port %d from bonded device.\n", pi);
1686                         continue;
1687                 }
1688
1689                 port = &ports[pi];
1690                 if (rte_atomic16_cmpset(&(port->port_status),
1691                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1692                         printf("Port %d is already closed\n", pi);
1693                         continue;
1694                 }
1695
1696                 if (rte_atomic16_cmpset(&(port->port_status),
1697                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1698                         printf("Port %d is now not stopped\n", pi);
1699                         continue;
1700                 }
1701
1702                 if (port->flow_list)
1703                         port_flow_flush(pi);
1704                 rte_eth_dev_close(pi);
1705
1706                 if (rte_atomic16_cmpset(&(port->port_status),
1707                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1708                         printf("Port %d cannot be set to closed\n", pi);
1709         }
1710
1711         printf("Done\n");
1712 }
1713
1714 void
1715 reset_port(portid_t pid)
1716 {
1717         int diag;
1718         portid_t pi;
1719         struct rte_port *port;
1720
1721         if (port_id_is_invalid(pid, ENABLED_WARN))
1722                 return;
1723
1724         printf("Resetting ports...\n");
1725
1726         RTE_ETH_FOREACH_DEV(pi) {
1727                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1728                         continue;
1729
1730                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1731                         printf("Please remove port %d from forwarding "
1732                                "configuration.\n", pi);
1733                         continue;
1734                 }
1735
1736                 if (port_is_bonding_slave(pi)) {
1737                         printf("Please remove port %d from bonded device.\n",
1738                                pi);
1739                         continue;
1740                 }
1741
1742                 diag = rte_eth_dev_reset(pi);
1743                 if (diag == 0) {
1744                         port = &ports[pi];
1745                         port->need_reconfig = 1;
1746                         port->need_reconfig_queues = 1;
1747                 } else {
1748                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1749                 }
1750         }
1751
1752         printf("Done\n");
1753 }
1754
1755 void
1756 attach_port(char *identifier)
1757 {
1758         portid_t pi = 0;
1759         unsigned int socket_id;
1760
1761         printf("Attaching a new port...\n");
1762
1763         if (identifier == NULL) {
1764                 printf("Invalid parameters are specified\n");
1765                 return;
1766         }
1767
1768         if (rte_eth_dev_attach(identifier, &pi))
1769                 return;
1770
1771         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1772         /* if socket_id is invalid, set to 0 */
1773         if (check_socket_id(socket_id) < 0)
1774                 socket_id = 0;
1775         reconfig(pi, socket_id);
1776         rte_eth_promiscuous_enable(pi);
1777
1778         nb_ports = rte_eth_dev_count();
1779
1780         ports[pi].port_status = RTE_PORT_STOPPED;
1781
1782         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1783         printf("Done\n");
1784 }
1785
1786 void
1787 detach_port(uint8_t port_id)
1788 {
1789         char name[RTE_ETH_NAME_MAX_LEN];
1790
1791         printf("Detaching a port...\n");
1792
1793         if (!port_is_closed(port_id)) {
1794                 printf("Please close port first\n");
1795                 return;
1796         }
1797
1798         if (ports[port_id].flow_list)
1799                 port_flow_flush(port_id);
1800
1801         if (rte_eth_dev_detach(port_id, name)) {
1802                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1803                 return;
1804         }
1805
1806         nb_ports = rte_eth_dev_count();
1807
1808         printf("Port '%s' is detached. Now total ports is %d\n",
1809                         name, nb_ports);
1810         printf("Done\n");
1811         return;
1812 }
1813
1814 void
1815 pmd_test_exit(void)
1816 {
1817         portid_t pt_id;
1818
1819         if (test_done == 0)
1820                 stop_packet_forwarding();
1821
1822         if (ports != NULL) {
1823                 no_link_check = 1;
1824                 RTE_ETH_FOREACH_DEV(pt_id) {
1825                         printf("\nShutting down port %d...\n", pt_id);
1826                         fflush(stdout);
1827                         stop_port(pt_id);
1828                         close_port(pt_id);
1829                 }
1830         }
1831         printf("\nBye...\n");
1832 }
1833
1834 typedef void (*cmd_func_t)(void);
1835 struct pmd_test_command {
1836         const char *cmd_name;
1837         cmd_func_t cmd_func;
1838 };
1839
1840 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1841
1842 /* Check the link status of all ports in up to 9s, and print them finally */
1843 static void
1844 check_all_ports_link_status(uint32_t port_mask)
1845 {
1846 #define CHECK_INTERVAL 100 /* 100ms */
1847 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1848         portid_t portid;
1849         uint8_t count, all_ports_up, print_flag = 0;
1850         struct rte_eth_link link;
1851
1852         printf("Checking link statuses...\n");
1853         fflush(stdout);
1854         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1855                 all_ports_up = 1;
1856                 RTE_ETH_FOREACH_DEV(portid) {
1857                         if ((port_mask & (1 << portid)) == 0)
1858                                 continue;
1859                         memset(&link, 0, sizeof(link));
1860                         rte_eth_link_get_nowait(portid, &link);
1861                         /* print link status if flag set */
1862                         if (print_flag == 1) {
1863                                 if (link.link_status)
1864                                         printf(
1865                                         "Port%d Link Up. speed %u Mbps- %s\n",
1866                                         portid, link.link_speed,
1867                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1868                                         ("full-duplex") : ("half-duplex\n"));
1869                                 else
1870                                         printf("Port %d Link Down\n", portid);
1871                                 continue;
1872                         }
1873                         /* clear all_ports_up flag if any link down */
1874                         if (link.link_status == ETH_LINK_DOWN) {
1875                                 all_ports_up = 0;
1876                                 break;
1877                         }
1878                 }
1879                 /* after finally printing all link status, get out */
1880                 if (print_flag == 1)
1881                         break;
1882
1883                 if (all_ports_up == 0) {
1884                         fflush(stdout);
1885                         rte_delay_ms(CHECK_INTERVAL);
1886                 }
1887
1888                 /* set the print_flag if all ports up or timeout */
1889                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1890                         print_flag = 1;
1891                 }
1892
1893                 if (lsc_interrupt)
1894                         break;
1895         }
1896 }
1897
1898 static void
1899 rmv_event_callback(void *arg)
1900 {
1901         struct rte_eth_dev *dev;
1902         uint8_t port_id = (intptr_t)arg;
1903
1904         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1905         dev = &rte_eth_devices[port_id];
1906
1907         stop_port(port_id);
1908         close_port(port_id);
1909         printf("removing device %s\n", dev->device->name);
1910         if (rte_eal_dev_detach(dev->device))
1911                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1912                         dev->device->name);
1913 }
1914
1915 /* This function is used by the interrupt thread */
1916 static int
1917 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1918                   void *ret_param)
1919 {
1920         static const char * const event_desc[] = {
1921                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1922                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1923                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1924                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1925                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1926                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1927                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1928                 [RTE_ETH_EVENT_MAX] = NULL,
1929         };
1930
1931         RTE_SET_USED(param);
1932         RTE_SET_USED(ret_param);
1933
1934         if (type >= RTE_ETH_EVENT_MAX) {
1935                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1936                         port_id, __func__, type);
1937                 fflush(stderr);
1938         } else if (event_print_mask & (UINT32_C(1) << type)) {
1939                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1940                         event_desc[type]);
1941                 fflush(stdout);
1942         }
1943
1944         switch (type) {
1945         case RTE_ETH_EVENT_INTR_RMV:
1946                 if (rte_eal_alarm_set(100000,
1947                                 rmv_event_callback, (void *)(intptr_t)port_id))
1948                         fprintf(stderr, "Could not set up deferred device removal\n");
1949                 break;
1950         default:
1951                 break;
1952         }
1953         return 0;
1954 }
1955
1956 static int
1957 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1958 {
1959         uint16_t i;
1960         int diag;
1961         uint8_t mapping_found = 0;
1962
1963         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1964                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1965                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1966                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1967                                         tx_queue_stats_mappings[i].queue_id,
1968                                         tx_queue_stats_mappings[i].stats_counter_id);
1969                         if (diag != 0)
1970                                 return diag;
1971                         mapping_found = 1;
1972                 }
1973         }
1974         if (mapping_found)
1975                 port->tx_queue_stats_mapping_enabled = 1;
1976         return 0;
1977 }
1978
1979 static int
1980 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1981 {
1982         uint16_t i;
1983         int diag;
1984         uint8_t mapping_found = 0;
1985
1986         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1987                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1988                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1989                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1990                                         rx_queue_stats_mappings[i].queue_id,
1991                                         rx_queue_stats_mappings[i].stats_counter_id);
1992                         if (diag != 0)
1993                                 return diag;
1994                         mapping_found = 1;
1995                 }
1996         }
1997         if (mapping_found)
1998                 port->rx_queue_stats_mapping_enabled = 1;
1999         return 0;
2000 }
2001
2002 static void
2003 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
2004 {
2005         int diag = 0;
2006
2007         diag = set_tx_queue_stats_mapping_registers(pi, port);
2008         if (diag != 0) {
2009                 if (diag == -ENOTSUP) {
2010                         port->tx_queue_stats_mapping_enabled = 0;
2011                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2012                 }
2013                 else
2014                         rte_exit(EXIT_FAILURE,
2015                                         "set_tx_queue_stats_mapping_registers "
2016                                         "failed for port id=%d diag=%d\n",
2017                                         pi, diag);
2018         }
2019
2020         diag = set_rx_queue_stats_mapping_registers(pi, port);
2021         if (diag != 0) {
2022                 if (diag == -ENOTSUP) {
2023                         port->rx_queue_stats_mapping_enabled = 0;
2024                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2025                 }
2026                 else
2027                         rte_exit(EXIT_FAILURE,
2028                                         "set_rx_queue_stats_mapping_registers "
2029                                         "failed for port id=%d diag=%d\n",
2030                                         pi, diag);
2031         }
2032 }
2033
2034 static void
2035 rxtx_port_config(struct rte_port *port)
2036 {
2037         port->rx_conf = port->dev_info.default_rxconf;
2038         port->tx_conf = port->dev_info.default_txconf;
2039
2040         /* Check if any RX/TX parameters have been passed */
2041         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2042                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2043
2044         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2045                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2046
2047         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2048                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2049
2050         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2051                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2052
2053         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2054                 port->rx_conf.rx_drop_en = rx_drop_en;
2055
2056         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2057                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2058
2059         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2060                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2061
2062         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2063                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2064
2065         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2066                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2067
2068         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2069                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2070
2071         if (txq_flags != RTE_PMD_PARAM_UNSET)
2072                 port->tx_conf.txq_flags = txq_flags;
2073 }
2074
2075 void
2076 init_port_config(void)
2077 {
2078         portid_t pid;
2079         struct rte_port *port;
2080
2081         RTE_ETH_FOREACH_DEV(pid) {
2082                 port = &ports[pid];
2083                 port->dev_conf.rxmode = rx_mode;
2084                 port->dev_conf.fdir_conf = fdir_conf;
2085                 if (nb_rxq > 1) {
2086                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2087                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2088                 } else {
2089                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2090                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2091                 }
2092
2093                 if (port->dcb_flag == 0) {
2094                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2095                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2096                         else
2097                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2098                 }
2099
2100                 rxtx_port_config(port);
2101
2102                 rte_eth_macaddr_get(pid, &port->eth_addr);
2103
2104                 map_port_queue_stats_mapping_registers(pid, port);
2105 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2106                 rte_pmd_ixgbe_bypass_init(pid);
2107 #endif
2108
2109                 if (lsc_interrupt &&
2110                     (rte_eth_devices[pid].data->dev_flags &
2111                      RTE_ETH_DEV_INTR_LSC))
2112                         port->dev_conf.intr_conf.lsc = 1;
2113                 if (rmv_interrupt &&
2114                     (rte_eth_devices[pid].data->dev_flags &
2115                      RTE_ETH_DEV_INTR_RMV))
2116                         port->dev_conf.intr_conf.rmv = 1;
2117
2118 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2119                 /* Detect softnic port */
2120                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2121                         port->softnic_enable = 1;
2122                         memset(&port->softport, 0, sizeof(struct softnic_port));
2123
2124                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2125                                 port->softport.tm_flag = 1;
2126                 }
2127 #endif
2128         }
2129 }
2130
2131 void set_port_slave_flag(portid_t slave_pid)
2132 {
2133         struct rte_port *port;
2134
2135         port = &ports[slave_pid];
2136         port->slave_flag = 1;
2137 }
2138
2139 void clear_port_slave_flag(portid_t slave_pid)
2140 {
2141         struct rte_port *port;
2142
2143         port = &ports[slave_pid];
2144         port->slave_flag = 0;
2145 }
2146
2147 uint8_t port_is_bonding_slave(portid_t slave_pid)
2148 {
2149         struct rte_port *port;
2150
2151         port = &ports[slave_pid];
2152         return port->slave_flag;
2153 }
2154
2155 const uint16_t vlan_tags[] = {
2156                 0,  1,  2,  3,  4,  5,  6,  7,
2157                 8,  9, 10, 11,  12, 13, 14, 15,
2158                 16, 17, 18, 19, 20, 21, 22, 23,
2159                 24, 25, 26, 27, 28, 29, 30, 31
2160 };
2161
2162 static  int
2163 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2164                  enum dcb_mode_enable dcb_mode,
2165                  enum rte_eth_nb_tcs num_tcs,
2166                  uint8_t pfc_en)
2167 {
2168         uint8_t i;
2169
2170         /*
2171          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2172          * given above, and the number of traffic classes available for use.
2173          */
2174         if (dcb_mode == DCB_VT_ENABLED) {
2175                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2176                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2177                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2178                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2179
2180                 /* VMDQ+DCB RX and TX configurations */
2181                 vmdq_rx_conf->enable_default_pool = 0;
2182                 vmdq_rx_conf->default_pool = 0;
2183                 vmdq_rx_conf->nb_queue_pools =
2184                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2185                 vmdq_tx_conf->nb_queue_pools =
2186                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2187
2188                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2189                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2190                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2191                         vmdq_rx_conf->pool_map[i].pools =
2192                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2193                 }
2194                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2195                         vmdq_rx_conf->dcb_tc[i] = i;
2196                         vmdq_tx_conf->dcb_tc[i] = i;
2197                 }
2198
2199                 /* set DCB mode of RX and TX of multiple queues */
2200                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2201                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2202         } else {
2203                 struct rte_eth_dcb_rx_conf *rx_conf =
2204                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2205                 struct rte_eth_dcb_tx_conf *tx_conf =
2206                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2207
2208                 rx_conf->nb_tcs = num_tcs;
2209                 tx_conf->nb_tcs = num_tcs;
2210
2211                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2212                         rx_conf->dcb_tc[i] = i % num_tcs;
2213                         tx_conf->dcb_tc[i] = i % num_tcs;
2214                 }
2215                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2216                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2217                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2218         }
2219
2220         if (pfc_en)
2221                 eth_conf->dcb_capability_en =
2222                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2223         else
2224                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2225
2226         return 0;
2227 }
2228
2229 int
2230 init_port_dcb_config(portid_t pid,
2231                      enum dcb_mode_enable dcb_mode,
2232                      enum rte_eth_nb_tcs num_tcs,
2233                      uint8_t pfc_en)
2234 {
2235         struct rte_eth_conf port_conf;
2236         struct rte_port *rte_port;
2237         int retval;
2238         uint16_t i;
2239
2240         rte_port = &ports[pid];
2241
2242         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2243         /* Enter DCB configuration status */
2244         dcb_config = 1;
2245
2246         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2247         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2248         if (retval < 0)
2249                 return retval;
2250         port_conf.rxmode.hw_vlan_filter = 1;
2251
2252         /**
2253          * Write the configuration into the device.
2254          * Set the numbers of RX & TX queues to 0, so
2255          * the RX & TX queues will not be setup.
2256          */
2257         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2258
2259         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2260
2261         /* If dev_info.vmdq_pool_base is greater than 0,
2262          * the queue id of vmdq pools is started after pf queues.
2263          */
2264         if (dcb_mode == DCB_VT_ENABLED &&
2265             rte_port->dev_info.vmdq_pool_base > 0) {
2266                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2267                         " for port %d.", pid);
2268                 return -1;
2269         }
2270
2271         /* Assume the ports in testpmd have the same dcb capability
2272          * and has the same number of rxq and txq in dcb mode
2273          */
2274         if (dcb_mode == DCB_VT_ENABLED) {
2275                 if (rte_port->dev_info.max_vfs > 0) {
2276                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2277                         nb_txq = rte_port->dev_info.nb_tx_queues;
2278                 } else {
2279                         nb_rxq = rte_port->dev_info.max_rx_queues;
2280                         nb_txq = rte_port->dev_info.max_tx_queues;
2281                 }
2282         } else {
2283                 /*if vt is disabled, use all pf queues */
2284                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2285                         nb_rxq = rte_port->dev_info.max_rx_queues;
2286                         nb_txq = rte_port->dev_info.max_tx_queues;
2287                 } else {
2288                         nb_rxq = (queueid_t)num_tcs;
2289                         nb_txq = (queueid_t)num_tcs;
2290
2291                 }
2292         }
2293         rx_free_thresh = 64;
2294
2295         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2296
2297         rxtx_port_config(rte_port);
2298         /* VLAN filter */
2299         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2300         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2301                 rx_vft_set(pid, vlan_tags[i], 1);
2302
2303         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2304         map_port_queue_stats_mapping_registers(pid, rte_port);
2305
2306         rte_port->dcb_flag = 1;
2307
2308         return 0;
2309 }
2310
2311 static void
2312 init_port(void)
2313 {
2314         /* Configuration of Ethernet ports. */
2315         ports = rte_zmalloc("testpmd: ports",
2316                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2317                             RTE_CACHE_LINE_SIZE);
2318         if (ports == NULL) {
2319                 rte_exit(EXIT_FAILURE,
2320                                 "rte_zmalloc(%d struct rte_port) failed\n",
2321                                 RTE_MAX_ETHPORTS);
2322         }
2323 }
2324
2325 static void
2326 force_quit(void)
2327 {
2328         pmd_test_exit();
2329         prompt_exit();
2330 }
2331
2332 static void
2333 print_stats(void)
2334 {
2335         uint8_t i;
2336         const char clr[] = { 27, '[', '2', 'J', '\0' };
2337         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2338
2339         /* Clear screen and move to top left */
2340         printf("%s%s", clr, top_left);
2341
2342         printf("\nPort statistics ====================================");
2343         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2344                 nic_stats_display(fwd_ports_ids[i]);
2345 }
2346
2347 static void
2348 signal_handler(int signum)
2349 {
2350         if (signum == SIGINT || signum == SIGTERM) {
2351                 printf("\nSignal %d received, preparing to exit...\n",
2352                                 signum);
2353 #ifdef RTE_LIBRTE_PDUMP
2354                 /* uninitialize packet capture framework */
2355                 rte_pdump_uninit();
2356 #endif
2357 #ifdef RTE_LIBRTE_LATENCY_STATS
2358                 rte_latencystats_uninit();
2359 #endif
2360                 force_quit();
2361                 /* Set flag to indicate the force termination. */
2362                 f_quit = 1;
2363                 /* exit with the expected status */
2364                 signal(signum, SIG_DFL);
2365                 kill(getpid(), signum);
2366         }
2367 }
2368
2369 int
2370 main(int argc, char** argv)
2371 {
2372         int  diag;
2373         portid_t port_id;
2374
2375         signal(SIGINT, signal_handler);
2376         signal(SIGTERM, signal_handler);
2377
2378         diag = rte_eal_init(argc, argv);
2379         if (diag < 0)
2380                 rte_panic("Cannot init EAL\n");
2381
2382         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2383                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2384                         strerror(errno));
2385         }
2386
2387 #ifdef RTE_LIBRTE_PDUMP
2388         /* initialize packet capture framework */
2389         rte_pdump_init(NULL);
2390 #endif
2391
2392         nb_ports = (portid_t) rte_eth_dev_count();
2393         if (nb_ports == 0)
2394                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2395
2396         /* allocate port structures, and init them */
2397         init_port();
2398
2399         set_def_fwd_config();
2400         if (nb_lcores == 0)
2401                 rte_panic("Empty set of forwarding logical cores - check the "
2402                           "core mask supplied in the command parameters\n");
2403
2404         /* Bitrate/latency stats disabled by default */
2405 #ifdef RTE_LIBRTE_BITRATE
2406         bitrate_enabled = 0;
2407 #endif
2408 #ifdef RTE_LIBRTE_LATENCY_STATS
2409         latencystats_enabled = 0;
2410 #endif
2411
2412         argc -= diag;
2413         argv += diag;
2414         if (argc > 1)
2415                 launch_args_parse(argc, argv);
2416
2417         if (tx_first && interactive)
2418                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2419                                 "interactive mode.\n");
2420
2421         if (tx_first && lsc_interrupt) {
2422                 printf("Warning: lsc_interrupt needs to be off when "
2423                                 " using tx_first. Disabling.\n");
2424                 lsc_interrupt = 0;
2425         }
2426
2427         if (!nb_rxq && !nb_txq)
2428                 printf("Warning: Either rx or tx queues should be non-zero\n");
2429
2430         if (nb_rxq > 1 && nb_rxq > nb_txq)
2431                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2432                        "but nb_txq=%d will prevent to fully test it.\n",
2433                        nb_rxq, nb_txq);
2434
2435         init_config();
2436         if (start_port(RTE_PORT_ALL) != 0)
2437                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2438
2439         /* set all ports to promiscuous mode by default */
2440         RTE_ETH_FOREACH_DEV(port_id)
2441                 rte_eth_promiscuous_enable(port_id);
2442
2443         /* Init metrics library */
2444         rte_metrics_init(rte_socket_id());
2445
2446 #ifdef RTE_LIBRTE_LATENCY_STATS
2447         if (latencystats_enabled != 0) {
2448                 int ret = rte_latencystats_init(1, NULL);
2449                 if (ret)
2450                         printf("Warning: latencystats init()"
2451                                 " returned error %d\n", ret);
2452                 printf("Latencystats running on lcore %d\n",
2453                         latencystats_lcore_id);
2454         }
2455 #endif
2456
2457         /* Setup bitrate stats */
2458 #ifdef RTE_LIBRTE_BITRATE
2459         if (bitrate_enabled != 0) {
2460                 bitrate_data = rte_stats_bitrate_create();
2461                 if (bitrate_data == NULL)
2462                         rte_exit(EXIT_FAILURE,
2463                                 "Could not allocate bitrate data.\n");
2464                 rte_stats_bitrate_reg(bitrate_data);
2465         }
2466 #endif
2467
2468 #ifdef RTE_LIBRTE_CMDLINE
2469         if (strlen(cmdline_filename) != 0)
2470                 cmdline_read_from_file(cmdline_filename);
2471
2472         if (interactive == 1) {
2473                 if (auto_start) {
2474                         printf("Start automatic packet forwarding\n");
2475                         start_packet_forwarding(0);
2476                 }
2477                 prompt();
2478                 pmd_test_exit();
2479         } else
2480 #endif
2481         {
2482                 char c;
2483                 int rc;
2484
2485                 f_quit = 0;
2486
2487                 printf("No commandline core given, start packet forwarding\n");
2488                 start_packet_forwarding(tx_first);
2489                 if (stats_period != 0) {
2490                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2491                         uint64_t timer_period;
2492
2493                         /* Convert to number of cycles */
2494                         timer_period = stats_period * rte_get_timer_hz();
2495
2496                         while (f_quit == 0) {
2497                                 cur_time = rte_get_timer_cycles();
2498                                 diff_time += cur_time - prev_time;
2499
2500                                 if (diff_time >= timer_period) {
2501                                         print_stats();
2502                                         /* Reset the timer */
2503                                         diff_time = 0;
2504                                 }
2505                                 /* Sleep to avoid unnecessary checks */
2506                                 prev_time = cur_time;
2507                                 sleep(1);
2508                         }
2509                 }
2510
2511                 printf("Press enter to exit\n");
2512                 rc = read(0, &c, 1);
2513                 pmd_test_exit();
2514                 if (rc < 0)
2515                         return 1;
2516         }
2517
2518         return 0;
2519 }