app/testpmd: add command line option for Tx offloads
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61
62 #include "testpmd.h"
63
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;        /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130         &io_fwd_engine,
131         &mac_fwd_engine,
132         &mac_swap_engine,
133         &flow_gen_engine,
134         &rx_only_engine,
135         &tx_only_engine,
136         &csum_fwd_engine,
137         &icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139         &softnic_tm_engine,
140         &softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143         &ieee1588_fwd_engine,
144 #endif
145         NULL,
146 };
147
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170         TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Configurable value of TX queue flags.
235  */
236 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Receive Side Scaling (RSS) configuration.
240  */
241 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
242
243 /*
244  * Port topology configuration
245  */
246 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
247
248 /*
249  * Avoids to flush all the RX streams before starts forwarding.
250  */
251 uint8_t no_flush_rx = 0; /* flush by default */
252
253 /*
254  * Flow API isolated mode.
255  */
256 uint8_t flow_isolate_all;
257
258 /*
259  * Avoids to check link status when starting/stopping a port.
260  */
261 uint8_t no_link_check = 0; /* check by default */
262
263 /*
264  * Enable link status change notification
265  */
266 uint8_t lsc_interrupt = 1; /* enabled by default */
267
268 /*
269  * Enable device removal notification.
270  */
271 uint8_t rmv_interrupt = 1; /* enabled by default */
272
273 /*
274  * Display or mask ether events
275  * Default to all events except VF_MBOX
276  */
277 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
278                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
279                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
280                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
281                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
282                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
283
284 /*
285  * NIC bypass mode configuration options.
286  */
287
288 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
291 #endif
292
293
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305
306 #endif
307
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313         .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
314                      DEV_RX_OFFLOAD_VLAN_STRIP |
315                      DEV_RX_OFFLOAD_CRC_STRIP),
316         .ignore_offload_bitfield = 1,
317 };
318
319 struct rte_eth_txmode tx_mode;
320
321 struct rte_fdir_conf fdir_conf = {
322         .mode = RTE_FDIR_MODE_NONE,
323         .pballoc = RTE_FDIR_PBALLOC_64K,
324         .status = RTE_FDIR_REPORT_STATUS,
325         .mask = {
326                 .vlan_tci_mask = 0x0,
327                 .ipv4_mask     = {
328                         .src_ip = 0xFFFFFFFF,
329                         .dst_ip = 0xFFFFFFFF,
330                 },
331                 .ipv6_mask     = {
332                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
333                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334                 },
335                 .src_port_mask = 0xFFFF,
336                 .dst_port_mask = 0xFFFF,
337                 .mac_addr_byte_mask = 0xFF,
338                 .tunnel_type_mask = 1,
339                 .tunnel_id_mask = 0xFFFFFFFF,
340         },
341         .drop_queue = 127,
342 };
343
344 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
345
346 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
347 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
348
349 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
350 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
351
352 uint16_t nb_tx_queue_stats_mappings = 0;
353 uint16_t nb_rx_queue_stats_mappings = 0;
354
355 /*
356  * Display zero values by default for xstats
357  */
358 uint8_t xstats_hide_zero;
359
360 unsigned int num_sockets = 0;
361 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
362
363 #ifdef RTE_LIBRTE_BITRATE
364 /* Bitrate statistics */
365 struct rte_stats_bitrates *bitrate_data;
366 lcoreid_t bitrate_lcore_id;
367 uint8_t bitrate_enabled;
368 #endif
369
370 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
371 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
372
373 /* Forward function declarations */
374 static void map_port_queue_stats_mapping_registers(portid_t pi,
375                                                    struct rte_port *port);
376 static void check_all_ports_link_status(uint32_t port_mask);
377 static int eth_event_callback(portid_t port_id,
378                               enum rte_eth_event_type type,
379                               void *param, void *ret_param);
380
381 /*
382  * Check if all the ports are started.
383  * If yes, return positive value. If not, return zero.
384  */
385 static int all_ports_started(void);
386
387 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
388 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
389
390 /*
391  * Helper function to check if socket is already discovered.
392  * If yes, return positive value. If not, return zero.
393  */
394 int
395 new_socket_id(unsigned int socket_id)
396 {
397         unsigned int i;
398
399         for (i = 0; i < num_sockets; i++) {
400                 if (socket_ids[i] == socket_id)
401                         return 0;
402         }
403         return 1;
404 }
405
406 /*
407  * Setup default configuration.
408  */
409 static void
410 set_default_fwd_lcores_config(void)
411 {
412         unsigned int i;
413         unsigned int nb_lc;
414         unsigned int sock_num;
415
416         nb_lc = 0;
417         for (i = 0; i < RTE_MAX_LCORE; i++) {
418                 sock_num = rte_lcore_to_socket_id(i);
419                 if (new_socket_id(sock_num)) {
420                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
421                                 rte_exit(EXIT_FAILURE,
422                                          "Total sockets greater than %u\n",
423                                          RTE_MAX_NUMA_NODES);
424                         }
425                         socket_ids[num_sockets++] = sock_num;
426                 }
427                 if (!rte_lcore_is_enabled(i))
428                         continue;
429                 if (i == rte_get_master_lcore())
430                         continue;
431                 fwd_lcores_cpuids[nb_lc++] = i;
432         }
433         nb_lcores = (lcoreid_t) nb_lc;
434         nb_cfg_lcores = nb_lcores;
435         nb_fwd_lcores = 1;
436 }
437
438 static void
439 set_def_peer_eth_addrs(void)
440 {
441         portid_t i;
442
443         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
444                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
445                 peer_eth_addrs[i].addr_bytes[5] = i;
446         }
447 }
448
449 static void
450 set_default_fwd_ports_config(void)
451 {
452         portid_t pt_id;
453         int i = 0;
454
455         RTE_ETH_FOREACH_DEV(pt_id)
456                 fwd_ports_ids[i++] = pt_id;
457
458         nb_cfg_ports = nb_ports;
459         nb_fwd_ports = nb_ports;
460 }
461
462 void
463 set_def_fwd_config(void)
464 {
465         set_default_fwd_lcores_config();
466         set_def_peer_eth_addrs();
467         set_default_fwd_ports_config();
468 }
469
470 /*
471  * Configuration initialisation done once at init time.
472  */
473 static void
474 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
475                  unsigned int socket_id)
476 {
477         char pool_name[RTE_MEMPOOL_NAMESIZE];
478         struct rte_mempool *rte_mp = NULL;
479         uint32_t mb_size;
480
481         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
482         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
483
484         TESTPMD_LOG(INFO,
485                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
486                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
487
488         if (mp_anon != 0) {
489                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
490                         mb_size, (unsigned) mb_mempool_cache,
491                         sizeof(struct rte_pktmbuf_pool_private),
492                         socket_id, 0);
493                 if (rte_mp == NULL)
494                         goto err;
495
496                 if (rte_mempool_populate_anon(rte_mp) == 0) {
497                         rte_mempool_free(rte_mp);
498                         rte_mp = NULL;
499                         goto err;
500                 }
501                 rte_pktmbuf_pool_init(rte_mp, NULL);
502                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
503         } else {
504                 /* wrapper to rte_mempool_create() */
505                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
506                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
507         }
508
509 err:
510         if (rte_mp == NULL) {
511                 rte_exit(EXIT_FAILURE,
512                         "Creation of mbuf pool for socket %u failed: %s\n",
513                         socket_id, rte_strerror(rte_errno));
514         } else if (verbose_level > 0) {
515                 rte_mempool_dump(stdout, rte_mp);
516         }
517 }
518
519 /*
520  * Check given socket id is valid or not with NUMA mode,
521  * if valid, return 0, else return -1
522  */
523 static int
524 check_socket_id(const unsigned int socket_id)
525 {
526         static int warning_once = 0;
527
528         if (new_socket_id(socket_id)) {
529                 if (!warning_once && numa_support)
530                         printf("Warning: NUMA should be configured manually by"
531                                " using --port-numa-config and"
532                                " --ring-numa-config parameters along with"
533                                " --numa.\n");
534                 warning_once = 1;
535                 return -1;
536         }
537         return 0;
538 }
539
540 static void
541 init_config(void)
542 {
543         portid_t pid;
544         struct rte_port *port;
545         struct rte_mempool *mbp;
546         unsigned int nb_mbuf_per_pool;
547         lcoreid_t  lc_id;
548         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
549         struct rte_gro_param gro_param;
550         uint32_t gso_types;
551
552         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
553
554         if (numa_support) {
555                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
556                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
557                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
558         }
559
560         /* Configuration of logical cores. */
561         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
562                                 sizeof(struct fwd_lcore *) * nb_lcores,
563                                 RTE_CACHE_LINE_SIZE);
564         if (fwd_lcores == NULL) {
565                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
566                                                         "failed\n", nb_lcores);
567         }
568         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
569                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
570                                                sizeof(struct fwd_lcore),
571                                                RTE_CACHE_LINE_SIZE);
572                 if (fwd_lcores[lc_id] == NULL) {
573                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
574                                                                 "failed\n");
575                 }
576                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
577         }
578
579         RTE_ETH_FOREACH_DEV(pid) {
580                 port = &ports[pid];
581                 /* Apply default Tx configuration for all ports */
582                 port->dev_conf.txmode = tx_mode;
583                 rte_eth_dev_info_get(pid, &port->dev_info);
584
585                 if (numa_support) {
586                         if (port_numa[pid] != NUMA_NO_CONFIG)
587                                 port_per_socket[port_numa[pid]]++;
588                         else {
589                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
590
591                                 /* if socket_id is invalid, set to 0 */
592                                 if (check_socket_id(socket_id) < 0)
593                                         socket_id = 0;
594                                 port_per_socket[socket_id]++;
595                         }
596                 }
597
598                 /* set flag to initialize port/queue */
599                 port->need_reconfig = 1;
600                 port->need_reconfig_queues = 1;
601         }
602
603         /*
604          * Create pools of mbuf.
605          * If NUMA support is disabled, create a single pool of mbuf in
606          * socket 0 memory by default.
607          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
608          *
609          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
610          * nb_txd can be configured at run time.
611          */
612         if (param_total_num_mbufs)
613                 nb_mbuf_per_pool = param_total_num_mbufs;
614         else {
615                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
616                         (nb_lcores * mb_mempool_cache) +
617                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
618                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
619         }
620
621         if (numa_support) {
622                 uint8_t i;
623
624                 for (i = 0; i < num_sockets; i++)
625                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
626                                          socket_ids[i]);
627         } else {
628                 if (socket_num == UMA_NO_CONFIG)
629                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
630                 else
631                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
632                                                  socket_num);
633         }
634
635         init_port_config();
636
637         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
638                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
639         /*
640          * Records which Mbuf pool to use by each logical core, if needed.
641          */
642         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
643                 mbp = mbuf_pool_find(
644                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
645
646                 if (mbp == NULL)
647                         mbp = mbuf_pool_find(0);
648                 fwd_lcores[lc_id]->mbp = mbp;
649                 /* initialize GSO context */
650                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
651                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
652                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
653                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
654                         ETHER_CRC_LEN;
655                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
656         }
657
658         /* Configuration of packet forwarding streams. */
659         if (init_fwd_streams() < 0)
660                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
661
662         fwd_config_setup();
663
664         /* create a gro context for each lcore */
665         gro_param.gro_types = RTE_GRO_TCP_IPV4;
666         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
667         gro_param.max_item_per_flow = MAX_PKT_BURST;
668         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
669                 gro_param.socket_id = rte_lcore_to_socket_id(
670                                 fwd_lcores_cpuids[lc_id]);
671                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
672                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
673                         rte_exit(EXIT_FAILURE,
674                                         "rte_gro_ctx_create() failed\n");
675                 }
676         }
677 }
678
679
680 void
681 reconfig(portid_t new_port_id, unsigned socket_id)
682 {
683         struct rte_port *port;
684
685         /* Reconfiguration of Ethernet ports. */
686         port = &ports[new_port_id];
687         rte_eth_dev_info_get(new_port_id, &port->dev_info);
688
689         /* set flag to initialize port/queue */
690         port->need_reconfig = 1;
691         port->need_reconfig_queues = 1;
692         port->socket_id = socket_id;
693
694         init_port_config();
695 }
696
697
698 int
699 init_fwd_streams(void)
700 {
701         portid_t pid;
702         struct rte_port *port;
703         streamid_t sm_id, nb_fwd_streams_new;
704         queueid_t q;
705
706         /* set socket id according to numa or not */
707         RTE_ETH_FOREACH_DEV(pid) {
708                 port = &ports[pid];
709                 if (nb_rxq > port->dev_info.max_rx_queues) {
710                         printf("Fail: nb_rxq(%d) is greater than "
711                                 "max_rx_queues(%d)\n", nb_rxq,
712                                 port->dev_info.max_rx_queues);
713                         return -1;
714                 }
715                 if (nb_txq > port->dev_info.max_tx_queues) {
716                         printf("Fail: nb_txq(%d) is greater than "
717                                 "max_tx_queues(%d)\n", nb_txq,
718                                 port->dev_info.max_tx_queues);
719                         return -1;
720                 }
721                 if (numa_support) {
722                         if (port_numa[pid] != NUMA_NO_CONFIG)
723                                 port->socket_id = port_numa[pid];
724                         else {
725                                 port->socket_id = rte_eth_dev_socket_id(pid);
726
727                                 /* if socket_id is invalid, set to 0 */
728                                 if (check_socket_id(port->socket_id) < 0)
729                                         port->socket_id = 0;
730                         }
731                 }
732                 else {
733                         if (socket_num == UMA_NO_CONFIG)
734                                 port->socket_id = 0;
735                         else
736                                 port->socket_id = socket_num;
737                 }
738         }
739
740         q = RTE_MAX(nb_rxq, nb_txq);
741         if (q == 0) {
742                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
743                 return -1;
744         }
745         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
746         if (nb_fwd_streams_new == nb_fwd_streams)
747                 return 0;
748         /* clear the old */
749         if (fwd_streams != NULL) {
750                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
751                         if (fwd_streams[sm_id] == NULL)
752                                 continue;
753                         rte_free(fwd_streams[sm_id]);
754                         fwd_streams[sm_id] = NULL;
755                 }
756                 rte_free(fwd_streams);
757                 fwd_streams = NULL;
758         }
759
760         /* init new */
761         nb_fwd_streams = nb_fwd_streams_new;
762         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
763                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
764         if (fwd_streams == NULL)
765                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
766                                                 "failed\n", nb_fwd_streams);
767
768         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
769                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
770                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
771                 if (fwd_streams[sm_id] == NULL)
772                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
773                                                                 " failed\n");
774         }
775
776         return 0;
777 }
778
779 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
780 static void
781 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
782 {
783         unsigned int total_burst;
784         unsigned int nb_burst;
785         unsigned int burst_stats[3];
786         uint16_t pktnb_stats[3];
787         uint16_t nb_pkt;
788         int burst_percent[3];
789
790         /*
791          * First compute the total number of packet bursts and the
792          * two highest numbers of bursts of the same number of packets.
793          */
794         total_burst = 0;
795         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
796         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
797         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
798                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
799                 if (nb_burst == 0)
800                         continue;
801                 total_burst += nb_burst;
802                 if (nb_burst > burst_stats[0]) {
803                         burst_stats[1] = burst_stats[0];
804                         pktnb_stats[1] = pktnb_stats[0];
805                         burst_stats[0] = nb_burst;
806                         pktnb_stats[0] = nb_pkt;
807                 }
808         }
809         if (total_burst == 0)
810                 return;
811         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
812         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
813                burst_percent[0], (int) pktnb_stats[0]);
814         if (burst_stats[0] == total_burst) {
815                 printf("]\n");
816                 return;
817         }
818         if (burst_stats[0] + burst_stats[1] == total_burst) {
819                 printf(" + %d%% of %d pkts]\n",
820                        100 - burst_percent[0], pktnb_stats[1]);
821                 return;
822         }
823         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
824         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
825         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
826                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
827                 return;
828         }
829         printf(" + %d%% of %d pkts + %d%% of others]\n",
830                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
831 }
832 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
833
834 static void
835 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
836 {
837         struct rte_port *port;
838         uint8_t i;
839
840         static const char *fwd_stats_border = "----------------------";
841
842         port = &ports[port_id];
843         printf("\n  %s Forward statistics for port %-2d %s\n",
844                fwd_stats_border, port_id, fwd_stats_border);
845
846         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
847                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
848                        "%-"PRIu64"\n",
849                        stats->ipackets, stats->imissed,
850                        (uint64_t) (stats->ipackets + stats->imissed));
851
852                 if (cur_fwd_eng == &csum_fwd_engine)
853                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
854                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
855                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
856                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
857                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
858                 }
859
860                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
861                        "%-"PRIu64"\n",
862                        stats->opackets, port->tx_dropped,
863                        (uint64_t) (stats->opackets + port->tx_dropped));
864         }
865         else {
866                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
867                        "%14"PRIu64"\n",
868                        stats->ipackets, stats->imissed,
869                        (uint64_t) (stats->ipackets + stats->imissed));
870
871                 if (cur_fwd_eng == &csum_fwd_engine)
872                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
873                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
874                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
875                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
876                         printf("  RX-nombufs:             %14"PRIu64"\n",
877                                stats->rx_nombuf);
878                 }
879
880                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
881                        "%14"PRIu64"\n",
882                        stats->opackets, port->tx_dropped,
883                        (uint64_t) (stats->opackets + port->tx_dropped));
884         }
885
886 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
887         if (port->rx_stream)
888                 pkt_burst_stats_display("RX",
889                         &port->rx_stream->rx_burst_stats);
890         if (port->tx_stream)
891                 pkt_burst_stats_display("TX",
892                         &port->tx_stream->tx_burst_stats);
893 #endif
894
895         if (port->rx_queue_stats_mapping_enabled) {
896                 printf("\n");
897                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
898                         printf("  Stats reg %2d RX-packets:%14"PRIu64
899                                "     RX-errors:%14"PRIu64
900                                "    RX-bytes:%14"PRIu64"\n",
901                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
902                 }
903                 printf("\n");
904         }
905         if (port->tx_queue_stats_mapping_enabled) {
906                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
907                         printf("  Stats reg %2d TX-packets:%14"PRIu64
908                                "                                 TX-bytes:%14"PRIu64"\n",
909                                i, stats->q_opackets[i], stats->q_obytes[i]);
910                 }
911         }
912
913         printf("  %s--------------------------------%s\n",
914                fwd_stats_border, fwd_stats_border);
915 }
916
917 static void
918 fwd_stream_stats_display(streamid_t stream_id)
919 {
920         struct fwd_stream *fs;
921         static const char *fwd_top_stats_border = "-------";
922
923         fs = fwd_streams[stream_id];
924         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
925             (fs->fwd_dropped == 0))
926                 return;
927         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
928                "TX Port=%2d/Queue=%2d %s\n",
929                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
930                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
931         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
932                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
933
934         /* if checksum mode */
935         if (cur_fwd_eng == &csum_fwd_engine) {
936                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
937                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
938         }
939
940 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
941         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
942         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
943 #endif
944 }
945
946 static void
947 flush_fwd_rx_queues(void)
948 {
949         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
950         portid_t  rxp;
951         portid_t port_id;
952         queueid_t rxq;
953         uint16_t  nb_rx;
954         uint16_t  i;
955         uint8_t   j;
956         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
957         uint64_t timer_period;
958
959         /* convert to number of cycles */
960         timer_period = rte_get_timer_hz(); /* 1 second timeout */
961
962         for (j = 0; j < 2; j++) {
963                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
964                         for (rxq = 0; rxq < nb_rxq; rxq++) {
965                                 port_id = fwd_ports_ids[rxp];
966                                 /**
967                                 * testpmd can stuck in the below do while loop
968                                 * if rte_eth_rx_burst() always returns nonzero
969                                 * packets. So timer is added to exit this loop
970                                 * after 1sec timer expiry.
971                                 */
972                                 prev_tsc = rte_rdtsc();
973                                 do {
974                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
975                                                 pkts_burst, MAX_PKT_BURST);
976                                         for (i = 0; i < nb_rx; i++)
977                                                 rte_pktmbuf_free(pkts_burst[i]);
978
979                                         cur_tsc = rte_rdtsc();
980                                         diff_tsc = cur_tsc - prev_tsc;
981                                         timer_tsc += diff_tsc;
982                                 } while ((nb_rx > 0) &&
983                                         (timer_tsc < timer_period));
984                                 timer_tsc = 0;
985                         }
986                 }
987                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
988         }
989 }
990
991 static void
992 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
993 {
994         struct fwd_stream **fsm;
995         streamid_t nb_fs;
996         streamid_t sm_id;
997 #ifdef RTE_LIBRTE_BITRATE
998         uint64_t tics_per_1sec;
999         uint64_t tics_datum;
1000         uint64_t tics_current;
1001         uint8_t idx_port, cnt_ports;
1002
1003         cnt_ports = rte_eth_dev_count();
1004         tics_datum = rte_rdtsc();
1005         tics_per_1sec = rte_get_timer_hz();
1006 #endif
1007         fsm = &fwd_streams[fc->stream_idx];
1008         nb_fs = fc->stream_nb;
1009         do {
1010                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1011                         (*pkt_fwd)(fsm[sm_id]);
1012 #ifdef RTE_LIBRTE_BITRATE
1013                 if (bitrate_enabled != 0 &&
1014                                 bitrate_lcore_id == rte_lcore_id()) {
1015                         tics_current = rte_rdtsc();
1016                         if (tics_current - tics_datum >= tics_per_1sec) {
1017                                 /* Periodic bitrate calculation */
1018                                 for (idx_port = 0;
1019                                                 idx_port < cnt_ports;
1020                                                 idx_port++)
1021                                         rte_stats_bitrate_calc(bitrate_data,
1022                                                 idx_port);
1023                                 tics_datum = tics_current;
1024                         }
1025                 }
1026 #endif
1027 #ifdef RTE_LIBRTE_LATENCY_STATS
1028                 if (latencystats_enabled != 0 &&
1029                                 latencystats_lcore_id == rte_lcore_id())
1030                         rte_latencystats_update();
1031 #endif
1032
1033         } while (! fc->stopped);
1034 }
1035
1036 static int
1037 start_pkt_forward_on_core(void *fwd_arg)
1038 {
1039         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1040                              cur_fwd_config.fwd_eng->packet_fwd);
1041         return 0;
1042 }
1043
1044 /*
1045  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1046  * Used to start communication flows in network loopback test configurations.
1047  */
1048 static int
1049 run_one_txonly_burst_on_core(void *fwd_arg)
1050 {
1051         struct fwd_lcore *fwd_lc;
1052         struct fwd_lcore tmp_lcore;
1053
1054         fwd_lc = (struct fwd_lcore *) fwd_arg;
1055         tmp_lcore = *fwd_lc;
1056         tmp_lcore.stopped = 1;
1057         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1058         return 0;
1059 }
1060
1061 /*
1062  * Launch packet forwarding:
1063  *     - Setup per-port forwarding context.
1064  *     - launch logical cores with their forwarding configuration.
1065  */
1066 static void
1067 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1068 {
1069         port_fwd_begin_t port_fwd_begin;
1070         unsigned int i;
1071         unsigned int lc_id;
1072         int diag;
1073
1074         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1075         if (port_fwd_begin != NULL) {
1076                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1077                         (*port_fwd_begin)(fwd_ports_ids[i]);
1078         }
1079         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1080                 lc_id = fwd_lcores_cpuids[i];
1081                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1082                         fwd_lcores[i]->stopped = 0;
1083                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1084                                                      fwd_lcores[i], lc_id);
1085                         if (diag != 0)
1086                                 printf("launch lcore %u failed - diag=%d\n",
1087                                        lc_id, diag);
1088                 }
1089         }
1090 }
1091
1092 /*
1093  * Launch packet forwarding configuration.
1094  */
1095 void
1096 start_packet_forwarding(int with_tx_first)
1097 {
1098         port_fwd_begin_t port_fwd_begin;
1099         port_fwd_end_t  port_fwd_end;
1100         struct rte_port *port;
1101         unsigned int i;
1102         portid_t   pt_id;
1103         streamid_t sm_id;
1104
1105         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1106                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1107
1108         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1109                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1110
1111         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1112                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1113                 (!nb_rxq || !nb_txq))
1114                 rte_exit(EXIT_FAILURE,
1115                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1116                         cur_fwd_eng->fwd_mode_name);
1117
1118         if (all_ports_started() == 0) {
1119                 printf("Not all ports were started\n");
1120                 return;
1121         }
1122         if (test_done == 0) {
1123                 printf("Packet forwarding already started\n");
1124                 return;
1125         }
1126
1127         if (init_fwd_streams() < 0) {
1128                 printf("Fail from init_fwd_streams()\n");
1129                 return;
1130         }
1131
1132         if(dcb_test) {
1133                 for (i = 0; i < nb_fwd_ports; i++) {
1134                         pt_id = fwd_ports_ids[i];
1135                         port = &ports[pt_id];
1136                         if (!port->dcb_flag) {
1137                                 printf("In DCB mode, all forwarding ports must "
1138                                        "be configured in this mode.\n");
1139                                 return;
1140                         }
1141                 }
1142                 if (nb_fwd_lcores == 1) {
1143                         printf("In DCB mode,the nb forwarding cores "
1144                                "should be larger than 1.\n");
1145                         return;
1146                 }
1147         }
1148         test_done = 0;
1149
1150         if(!no_flush_rx)
1151                 flush_fwd_rx_queues();
1152
1153         fwd_config_setup();
1154         pkt_fwd_config_display(&cur_fwd_config);
1155         rxtx_config_display();
1156
1157         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1158                 pt_id = fwd_ports_ids[i];
1159                 port = &ports[pt_id];
1160                 rte_eth_stats_get(pt_id, &port->stats);
1161                 port->tx_dropped = 0;
1162
1163                 map_port_queue_stats_mapping_registers(pt_id, port);
1164         }
1165         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1166                 fwd_streams[sm_id]->rx_packets = 0;
1167                 fwd_streams[sm_id]->tx_packets = 0;
1168                 fwd_streams[sm_id]->fwd_dropped = 0;
1169                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1170                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1171
1172 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1173                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1174                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1175                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1176                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1177 #endif
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179                 fwd_streams[sm_id]->core_cycles = 0;
1180 #endif
1181         }
1182         if (with_tx_first) {
1183                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1184                 if (port_fwd_begin != NULL) {
1185                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1186                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1187                 }
1188                 while (with_tx_first--) {
1189                         launch_packet_forwarding(
1190                                         run_one_txonly_burst_on_core);
1191                         rte_eal_mp_wait_lcore();
1192                 }
1193                 port_fwd_end = tx_only_engine.port_fwd_end;
1194                 if (port_fwd_end != NULL) {
1195                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1196                                 (*port_fwd_end)(fwd_ports_ids[i]);
1197                 }
1198         }
1199         launch_packet_forwarding(start_pkt_forward_on_core);
1200 }
1201
1202 void
1203 stop_packet_forwarding(void)
1204 {
1205         struct rte_eth_stats stats;
1206         struct rte_port *port;
1207         port_fwd_end_t  port_fwd_end;
1208         int i;
1209         portid_t   pt_id;
1210         streamid_t sm_id;
1211         lcoreid_t  lc_id;
1212         uint64_t total_recv;
1213         uint64_t total_xmit;
1214         uint64_t total_rx_dropped;
1215         uint64_t total_tx_dropped;
1216         uint64_t total_rx_nombuf;
1217         uint64_t tx_dropped;
1218         uint64_t rx_bad_ip_csum;
1219         uint64_t rx_bad_l4_csum;
1220 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1221         uint64_t fwd_cycles;
1222 #endif
1223
1224         static const char *acc_stats_border = "+++++++++++++++";
1225
1226         if (test_done) {
1227                 printf("Packet forwarding not started\n");
1228                 return;
1229         }
1230         printf("Telling cores to stop...");
1231         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1232                 fwd_lcores[lc_id]->stopped = 1;
1233         printf("\nWaiting for lcores to finish...\n");
1234         rte_eal_mp_wait_lcore();
1235         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1236         if (port_fwd_end != NULL) {
1237                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1238                         pt_id = fwd_ports_ids[i];
1239                         (*port_fwd_end)(pt_id);
1240                 }
1241         }
1242 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1243         fwd_cycles = 0;
1244 #endif
1245         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1246                 if (cur_fwd_config.nb_fwd_streams >
1247                     cur_fwd_config.nb_fwd_ports) {
1248                         fwd_stream_stats_display(sm_id);
1249                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1250                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1251                 } else {
1252                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1253                                 fwd_streams[sm_id];
1254                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1255                                 fwd_streams[sm_id];
1256                 }
1257                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1258                 tx_dropped = (uint64_t) (tx_dropped +
1259                                          fwd_streams[sm_id]->fwd_dropped);
1260                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1261
1262                 rx_bad_ip_csum =
1263                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1264                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1265                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1266                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1267                                                         rx_bad_ip_csum;
1268
1269                 rx_bad_l4_csum =
1270                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1271                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1272                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1273                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1274                                                         rx_bad_l4_csum;
1275
1276 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1277                 fwd_cycles = (uint64_t) (fwd_cycles +
1278                                          fwd_streams[sm_id]->core_cycles);
1279 #endif
1280         }
1281         total_recv = 0;
1282         total_xmit = 0;
1283         total_rx_dropped = 0;
1284         total_tx_dropped = 0;
1285         total_rx_nombuf  = 0;
1286         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1287                 pt_id = fwd_ports_ids[i];
1288
1289                 port = &ports[pt_id];
1290                 rte_eth_stats_get(pt_id, &stats);
1291                 stats.ipackets -= port->stats.ipackets;
1292                 port->stats.ipackets = 0;
1293                 stats.opackets -= port->stats.opackets;
1294                 port->stats.opackets = 0;
1295                 stats.ibytes   -= port->stats.ibytes;
1296                 port->stats.ibytes = 0;
1297                 stats.obytes   -= port->stats.obytes;
1298                 port->stats.obytes = 0;
1299                 stats.imissed  -= port->stats.imissed;
1300                 port->stats.imissed = 0;
1301                 stats.oerrors  -= port->stats.oerrors;
1302                 port->stats.oerrors = 0;
1303                 stats.rx_nombuf -= port->stats.rx_nombuf;
1304                 port->stats.rx_nombuf = 0;
1305
1306                 total_recv += stats.ipackets;
1307                 total_xmit += stats.opackets;
1308                 total_rx_dropped += stats.imissed;
1309                 total_tx_dropped += port->tx_dropped;
1310                 total_rx_nombuf  += stats.rx_nombuf;
1311
1312                 fwd_port_stats_display(pt_id, &stats);
1313         }
1314
1315         printf("\n  %s Accumulated forward statistics for all ports"
1316                "%s\n",
1317                acc_stats_border, acc_stats_border);
1318         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1319                "%-"PRIu64"\n"
1320                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1321                "%-"PRIu64"\n",
1322                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1323                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1324         if (total_rx_nombuf > 0)
1325                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1326         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1327                "%s\n",
1328                acc_stats_border, acc_stats_border);
1329 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1330         if (total_recv > 0)
1331                 printf("\n  CPU cycles/packet=%u (total cycles="
1332                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1333                        (unsigned int)(fwd_cycles / total_recv),
1334                        fwd_cycles, total_recv);
1335 #endif
1336         printf("\nDone.\n");
1337         test_done = 1;
1338 }
1339
1340 void
1341 dev_set_link_up(portid_t pid)
1342 {
1343         if (rte_eth_dev_set_link_up(pid) < 0)
1344                 printf("\nSet link up fail.\n");
1345 }
1346
1347 void
1348 dev_set_link_down(portid_t pid)
1349 {
1350         if (rte_eth_dev_set_link_down(pid) < 0)
1351                 printf("\nSet link down fail.\n");
1352 }
1353
1354 static int
1355 all_ports_started(void)
1356 {
1357         portid_t pi;
1358         struct rte_port *port;
1359
1360         RTE_ETH_FOREACH_DEV(pi) {
1361                 port = &ports[pi];
1362                 /* Check if there is a port which is not started */
1363                 if ((port->port_status != RTE_PORT_STARTED) &&
1364                         (port->slave_flag == 0))
1365                         return 0;
1366         }
1367
1368         /* No port is not started */
1369         return 1;
1370 }
1371
1372 int
1373 port_is_stopped(portid_t port_id)
1374 {
1375         struct rte_port *port = &ports[port_id];
1376
1377         if ((port->port_status != RTE_PORT_STOPPED) &&
1378             (port->slave_flag == 0))
1379                 return 0;
1380         return 1;
1381 }
1382
1383 int
1384 all_ports_stopped(void)
1385 {
1386         portid_t pi;
1387
1388         RTE_ETH_FOREACH_DEV(pi) {
1389                 if (!port_is_stopped(pi))
1390                         return 0;
1391         }
1392
1393         return 1;
1394 }
1395
1396 int
1397 port_is_started(portid_t port_id)
1398 {
1399         if (port_id_is_invalid(port_id, ENABLED_WARN))
1400                 return 0;
1401
1402         if (ports[port_id].port_status != RTE_PORT_STARTED)
1403                 return 0;
1404
1405         return 1;
1406 }
1407
1408 static int
1409 port_is_closed(portid_t port_id)
1410 {
1411         if (port_id_is_invalid(port_id, ENABLED_WARN))
1412                 return 0;
1413
1414         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1415                 return 0;
1416
1417         return 1;
1418 }
1419
1420 int
1421 start_port(portid_t pid)
1422 {
1423         int diag, need_check_link_status = -1;
1424         portid_t pi;
1425         queueid_t qi;
1426         struct rte_port *port;
1427         struct ether_addr mac_addr;
1428         enum rte_eth_event_type event_type;
1429
1430         if (port_id_is_invalid(pid, ENABLED_WARN))
1431                 return 0;
1432
1433         if(dcb_config)
1434                 dcb_test = 1;
1435         RTE_ETH_FOREACH_DEV(pi) {
1436                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1437                         continue;
1438
1439                 need_check_link_status = 0;
1440                 port = &ports[pi];
1441                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1442                                                  RTE_PORT_HANDLING) == 0) {
1443                         printf("Port %d is now not stopped\n", pi);
1444                         continue;
1445                 }
1446
1447                 if (port->need_reconfig > 0) {
1448                         port->need_reconfig = 0;
1449
1450                         if (flow_isolate_all) {
1451                                 int ret = port_flow_isolate(pi, 1);
1452                                 if (ret) {
1453                                         printf("Failed to apply isolated"
1454                                                " mode on port %d\n", pi);
1455                                         return -1;
1456                                 }
1457                         }
1458
1459                         printf("Configuring Port %d (socket %u)\n", pi,
1460                                         port->socket_id);
1461                         /* configure port */
1462                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1463                                                 &(port->dev_conf));
1464                         if (diag != 0) {
1465                                 if (rte_atomic16_cmpset(&(port->port_status),
1466                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1467                                         printf("Port %d can not be set back "
1468                                                         "to stopped\n", pi);
1469                                 printf("Fail to configure port %d\n", pi);
1470                                 /* try to reconfigure port next time */
1471                                 port->need_reconfig = 1;
1472                                 return -1;
1473                         }
1474                 }
1475                 if (port->need_reconfig_queues > 0) {
1476                         port->need_reconfig_queues = 0;
1477                         port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1478                         /* Apply Tx offloads configuration */
1479                         port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1480                         /* setup tx queues */
1481                         for (qi = 0; qi < nb_txq; qi++) {
1482                                 if ((numa_support) &&
1483                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1484                                         diag = rte_eth_tx_queue_setup(pi, qi,
1485                                                 nb_txd,txring_numa[pi],
1486                                                 &(port->tx_conf));
1487                                 else
1488                                         diag = rte_eth_tx_queue_setup(pi, qi,
1489                                                 nb_txd,port->socket_id,
1490                                                 &(port->tx_conf));
1491
1492                                 if (diag == 0)
1493                                         continue;
1494
1495                                 /* Fail to setup tx queue, return */
1496                                 if (rte_atomic16_cmpset(&(port->port_status),
1497                                                         RTE_PORT_HANDLING,
1498                                                         RTE_PORT_STOPPED) == 0)
1499                                         printf("Port %d can not be set back "
1500                                                         "to stopped\n", pi);
1501                                 printf("Fail to configure port %d tx queues\n", pi);
1502                                 /* try to reconfigure queues next time */
1503                                 port->need_reconfig_queues = 1;
1504                                 return -1;
1505                         }
1506                         /* Apply Rx offloads configuration */
1507                         port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1508                         /* setup rx queues */
1509                         for (qi = 0; qi < nb_rxq; qi++) {
1510                                 if ((numa_support) &&
1511                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1512                                         struct rte_mempool * mp =
1513                                                 mbuf_pool_find(rxring_numa[pi]);
1514                                         if (mp == NULL) {
1515                                                 printf("Failed to setup RX queue:"
1516                                                         "No mempool allocation"
1517                                                         " on the socket %d\n",
1518                                                         rxring_numa[pi]);
1519                                                 return -1;
1520                                         }
1521
1522                                         diag = rte_eth_rx_queue_setup(pi, qi,
1523                                              nb_rxd,rxring_numa[pi],
1524                                              &(port->rx_conf),mp);
1525                                 } else {
1526                                         struct rte_mempool *mp =
1527                                                 mbuf_pool_find(port->socket_id);
1528                                         if (mp == NULL) {
1529                                                 printf("Failed to setup RX queue:"
1530                                                         "No mempool allocation"
1531                                                         " on the socket %d\n",
1532                                                         port->socket_id);
1533                                                 return -1;
1534                                         }
1535                                         diag = rte_eth_rx_queue_setup(pi, qi,
1536                                              nb_rxd,port->socket_id,
1537                                              &(port->rx_conf), mp);
1538                                 }
1539                                 if (diag == 0)
1540                                         continue;
1541
1542                                 /* Fail to setup rx queue, return */
1543                                 if (rte_atomic16_cmpset(&(port->port_status),
1544                                                         RTE_PORT_HANDLING,
1545                                                         RTE_PORT_STOPPED) == 0)
1546                                         printf("Port %d can not be set back "
1547                                                         "to stopped\n", pi);
1548                                 printf("Fail to configure port %d rx queues\n", pi);
1549                                 /* try to reconfigure queues next time */
1550                                 port->need_reconfig_queues = 1;
1551                                 return -1;
1552                         }
1553                 }
1554
1555                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1556                      event_type < RTE_ETH_EVENT_MAX;
1557                      event_type++) {
1558                         diag = rte_eth_dev_callback_register(pi,
1559                                                         event_type,
1560                                                         eth_event_callback,
1561                                                         NULL);
1562                         if (diag) {
1563                                 printf("Failed to setup even callback for event %d\n",
1564                                         event_type);
1565                                 return -1;
1566                         }
1567                 }
1568
1569                 /* start port */
1570                 if (rte_eth_dev_start(pi) < 0) {
1571                         printf("Fail to start port %d\n", pi);
1572
1573                         /* Fail to setup rx queue, return */
1574                         if (rte_atomic16_cmpset(&(port->port_status),
1575                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1576                                 printf("Port %d can not be set back to "
1577                                                         "stopped\n", pi);
1578                         continue;
1579                 }
1580
1581                 if (rte_atomic16_cmpset(&(port->port_status),
1582                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1583                         printf("Port %d can not be set into started\n", pi);
1584
1585                 rte_eth_macaddr_get(pi, &mac_addr);
1586                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1587                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1588                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1589                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1590
1591                 /* at least one port started, need checking link status */
1592                 need_check_link_status = 1;
1593         }
1594
1595         if (need_check_link_status == 1 && !no_link_check)
1596                 check_all_ports_link_status(RTE_PORT_ALL);
1597         else if (need_check_link_status == 0)
1598                 printf("Please stop the ports first\n");
1599
1600         printf("Done\n");
1601         return 0;
1602 }
1603
1604 void
1605 stop_port(portid_t pid)
1606 {
1607         portid_t pi;
1608         struct rte_port *port;
1609         int need_check_link_status = 0;
1610
1611         if (dcb_test) {
1612                 dcb_test = 0;
1613                 dcb_config = 0;
1614         }
1615
1616         if (port_id_is_invalid(pid, ENABLED_WARN))
1617                 return;
1618
1619         printf("Stopping ports...\n");
1620
1621         RTE_ETH_FOREACH_DEV(pi) {
1622                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1623                         continue;
1624
1625                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1626                         printf("Please remove port %d from forwarding configuration.\n", pi);
1627                         continue;
1628                 }
1629
1630                 if (port_is_bonding_slave(pi)) {
1631                         printf("Please remove port %d from bonded device.\n", pi);
1632                         continue;
1633                 }
1634
1635                 port = &ports[pi];
1636                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1637                                                 RTE_PORT_HANDLING) == 0)
1638                         continue;
1639
1640                 rte_eth_dev_stop(pi);
1641
1642                 if (rte_atomic16_cmpset(&(port->port_status),
1643                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1644                         printf("Port %d can not be set into stopped\n", pi);
1645                 need_check_link_status = 1;
1646         }
1647         if (need_check_link_status && !no_link_check)
1648                 check_all_ports_link_status(RTE_PORT_ALL);
1649
1650         printf("Done\n");
1651 }
1652
1653 void
1654 close_port(portid_t pid)
1655 {
1656         portid_t pi;
1657         struct rte_port *port;
1658
1659         if (port_id_is_invalid(pid, ENABLED_WARN))
1660                 return;
1661
1662         printf("Closing ports...\n");
1663
1664         RTE_ETH_FOREACH_DEV(pi) {
1665                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1666                         continue;
1667
1668                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1669                         printf("Please remove port %d from forwarding configuration.\n", pi);
1670                         continue;
1671                 }
1672
1673                 if (port_is_bonding_slave(pi)) {
1674                         printf("Please remove port %d from bonded device.\n", pi);
1675                         continue;
1676                 }
1677
1678                 port = &ports[pi];
1679                 if (rte_atomic16_cmpset(&(port->port_status),
1680                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1681                         printf("Port %d is already closed\n", pi);
1682                         continue;
1683                 }
1684
1685                 if (rte_atomic16_cmpset(&(port->port_status),
1686                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1687                         printf("Port %d is now not stopped\n", pi);
1688                         continue;
1689                 }
1690
1691                 if (port->flow_list)
1692                         port_flow_flush(pi);
1693                 rte_eth_dev_close(pi);
1694
1695                 if (rte_atomic16_cmpset(&(port->port_status),
1696                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1697                         printf("Port %d cannot be set to closed\n", pi);
1698         }
1699
1700         printf("Done\n");
1701 }
1702
1703 void
1704 reset_port(portid_t pid)
1705 {
1706         int diag;
1707         portid_t pi;
1708         struct rte_port *port;
1709
1710         if (port_id_is_invalid(pid, ENABLED_WARN))
1711                 return;
1712
1713         printf("Resetting ports...\n");
1714
1715         RTE_ETH_FOREACH_DEV(pi) {
1716                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1717                         continue;
1718
1719                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1720                         printf("Please remove port %d from forwarding "
1721                                "configuration.\n", pi);
1722                         continue;
1723                 }
1724
1725                 if (port_is_bonding_slave(pi)) {
1726                         printf("Please remove port %d from bonded device.\n",
1727                                pi);
1728                         continue;
1729                 }
1730
1731                 diag = rte_eth_dev_reset(pi);
1732                 if (diag == 0) {
1733                         port = &ports[pi];
1734                         port->need_reconfig = 1;
1735                         port->need_reconfig_queues = 1;
1736                 } else {
1737                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1738                 }
1739         }
1740
1741         printf("Done\n");
1742 }
1743
1744 void
1745 attach_port(char *identifier)
1746 {
1747         portid_t pi = 0;
1748         unsigned int socket_id;
1749
1750         printf("Attaching a new port...\n");
1751
1752         if (identifier == NULL) {
1753                 printf("Invalid parameters are specified\n");
1754                 return;
1755         }
1756
1757         if (rte_eth_dev_attach(identifier, &pi))
1758                 return;
1759
1760         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1761         /* if socket_id is invalid, set to 0 */
1762         if (check_socket_id(socket_id) < 0)
1763                 socket_id = 0;
1764         reconfig(pi, socket_id);
1765         rte_eth_promiscuous_enable(pi);
1766
1767         nb_ports = rte_eth_dev_count();
1768
1769         ports[pi].port_status = RTE_PORT_STOPPED;
1770
1771         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1772         printf("Done\n");
1773 }
1774
1775 void
1776 detach_port(portid_t port_id)
1777 {
1778         char name[RTE_ETH_NAME_MAX_LEN];
1779
1780         printf("Detaching a port...\n");
1781
1782         if (!port_is_closed(port_id)) {
1783                 printf("Please close port first\n");
1784                 return;
1785         }
1786
1787         if (ports[port_id].flow_list)
1788                 port_flow_flush(port_id);
1789
1790         if (rte_eth_dev_detach(port_id, name)) {
1791                 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1792                 return;
1793         }
1794
1795         nb_ports = rte_eth_dev_count();
1796
1797         printf("Port '%s' is detached. Now total ports is %d\n",
1798                         name, nb_ports);
1799         printf("Done\n");
1800         return;
1801 }
1802
1803 void
1804 pmd_test_exit(void)
1805 {
1806         portid_t pt_id;
1807
1808         if (test_done == 0)
1809                 stop_packet_forwarding();
1810
1811         if (ports != NULL) {
1812                 no_link_check = 1;
1813                 RTE_ETH_FOREACH_DEV(pt_id) {
1814                         printf("\nShutting down port %d...\n", pt_id);
1815                         fflush(stdout);
1816                         stop_port(pt_id);
1817                         close_port(pt_id);
1818                 }
1819         }
1820         printf("\nBye...\n");
1821 }
1822
1823 typedef void (*cmd_func_t)(void);
1824 struct pmd_test_command {
1825         const char *cmd_name;
1826         cmd_func_t cmd_func;
1827 };
1828
1829 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1830
1831 /* Check the link status of all ports in up to 9s, and print them finally */
1832 static void
1833 check_all_ports_link_status(uint32_t port_mask)
1834 {
1835 #define CHECK_INTERVAL 100 /* 100ms */
1836 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1837         portid_t portid;
1838         uint8_t count, all_ports_up, print_flag = 0;
1839         struct rte_eth_link link;
1840
1841         printf("Checking link statuses...\n");
1842         fflush(stdout);
1843         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1844                 all_ports_up = 1;
1845                 RTE_ETH_FOREACH_DEV(portid) {
1846                         if ((port_mask & (1 << portid)) == 0)
1847                                 continue;
1848                         memset(&link, 0, sizeof(link));
1849                         rte_eth_link_get_nowait(portid, &link);
1850                         /* print link status if flag set */
1851                         if (print_flag == 1) {
1852                                 if (link.link_status)
1853                                         printf(
1854                                         "Port%d Link Up. speed %u Mbps- %s\n",
1855                                         portid, link.link_speed,
1856                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1857                                         ("full-duplex") : ("half-duplex\n"));
1858                                 else
1859                                         printf("Port %d Link Down\n", portid);
1860                                 continue;
1861                         }
1862                         /* clear all_ports_up flag if any link down */
1863                         if (link.link_status == ETH_LINK_DOWN) {
1864                                 all_ports_up = 0;
1865                                 break;
1866                         }
1867                 }
1868                 /* after finally printing all link status, get out */
1869                 if (print_flag == 1)
1870                         break;
1871
1872                 if (all_ports_up == 0) {
1873                         fflush(stdout);
1874                         rte_delay_ms(CHECK_INTERVAL);
1875                 }
1876
1877                 /* set the print_flag if all ports up or timeout */
1878                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1879                         print_flag = 1;
1880                 }
1881
1882                 if (lsc_interrupt)
1883                         break;
1884         }
1885 }
1886
1887 static void
1888 rmv_event_callback(void *arg)
1889 {
1890         struct rte_eth_dev *dev;
1891         portid_t port_id = (intptr_t)arg;
1892
1893         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1894         dev = &rte_eth_devices[port_id];
1895
1896         stop_port(port_id);
1897         close_port(port_id);
1898         printf("removing device %s\n", dev->device->name);
1899         if (rte_eal_dev_detach(dev->device))
1900                 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1901                         dev->device->name);
1902 }
1903
1904 /* This function is used by the interrupt thread */
1905 static int
1906 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1907                   void *ret_param)
1908 {
1909         static const char * const event_desc[] = {
1910                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1911                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1912                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1913                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1914                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1915                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1916                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1917                 [RTE_ETH_EVENT_MAX] = NULL,
1918         };
1919
1920         RTE_SET_USED(param);
1921         RTE_SET_USED(ret_param);
1922
1923         if (type >= RTE_ETH_EVENT_MAX) {
1924                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1925                         port_id, __func__, type);
1926                 fflush(stderr);
1927         } else if (event_print_mask & (UINT32_C(1) << type)) {
1928                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1929                         event_desc[type]);
1930                 fflush(stdout);
1931         }
1932
1933         switch (type) {
1934         case RTE_ETH_EVENT_INTR_RMV:
1935                 if (rte_eal_alarm_set(100000,
1936                                 rmv_event_callback, (void *)(intptr_t)port_id))
1937                         fprintf(stderr, "Could not set up deferred device removal\n");
1938                 break;
1939         default:
1940                 break;
1941         }
1942         return 0;
1943 }
1944
1945 static int
1946 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1947 {
1948         uint16_t i;
1949         int diag;
1950         uint8_t mapping_found = 0;
1951
1952         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1953                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1954                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1955                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1956                                         tx_queue_stats_mappings[i].queue_id,
1957                                         tx_queue_stats_mappings[i].stats_counter_id);
1958                         if (diag != 0)
1959                                 return diag;
1960                         mapping_found = 1;
1961                 }
1962         }
1963         if (mapping_found)
1964                 port->tx_queue_stats_mapping_enabled = 1;
1965         return 0;
1966 }
1967
1968 static int
1969 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1970 {
1971         uint16_t i;
1972         int diag;
1973         uint8_t mapping_found = 0;
1974
1975         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1976                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1977                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1978                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1979                                         rx_queue_stats_mappings[i].queue_id,
1980                                         rx_queue_stats_mappings[i].stats_counter_id);
1981                         if (diag != 0)
1982                                 return diag;
1983                         mapping_found = 1;
1984                 }
1985         }
1986         if (mapping_found)
1987                 port->rx_queue_stats_mapping_enabled = 1;
1988         return 0;
1989 }
1990
1991 static void
1992 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1993 {
1994         int diag = 0;
1995
1996         diag = set_tx_queue_stats_mapping_registers(pi, port);
1997         if (diag != 0) {
1998                 if (diag == -ENOTSUP) {
1999                         port->tx_queue_stats_mapping_enabled = 0;
2000                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2001                 }
2002                 else
2003                         rte_exit(EXIT_FAILURE,
2004                                         "set_tx_queue_stats_mapping_registers "
2005                                         "failed for port id=%d diag=%d\n",
2006                                         pi, diag);
2007         }
2008
2009         diag = set_rx_queue_stats_mapping_registers(pi, port);
2010         if (diag != 0) {
2011                 if (diag == -ENOTSUP) {
2012                         port->rx_queue_stats_mapping_enabled = 0;
2013                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2014                 }
2015                 else
2016                         rte_exit(EXIT_FAILURE,
2017                                         "set_rx_queue_stats_mapping_registers "
2018                                         "failed for port id=%d diag=%d\n",
2019                                         pi, diag);
2020         }
2021 }
2022
2023 static void
2024 rxtx_port_config(struct rte_port *port)
2025 {
2026         port->rx_conf = port->dev_info.default_rxconf;
2027         port->tx_conf = port->dev_info.default_txconf;
2028
2029         /* Check if any RX/TX parameters have been passed */
2030         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2031                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2032
2033         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2034                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2035
2036         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2037                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2038
2039         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2040                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2041
2042         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2043                 port->rx_conf.rx_drop_en = rx_drop_en;
2044
2045         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2046                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2047
2048         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2049                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2050
2051         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2052                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2053
2054         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2055                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2056
2057         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2058                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2059
2060         if (txq_flags != RTE_PMD_PARAM_UNSET)
2061                 port->tx_conf.txq_flags = txq_flags;
2062 }
2063
2064 void
2065 init_port_config(void)
2066 {
2067         portid_t pid;
2068         struct rte_port *port;
2069
2070         RTE_ETH_FOREACH_DEV(pid) {
2071                 port = &ports[pid];
2072                 port->dev_conf.rxmode = rx_mode;
2073                 port->dev_conf.fdir_conf = fdir_conf;
2074                 if (nb_rxq > 1) {
2075                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2076                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2077                 } else {
2078                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2079                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2080                 }
2081
2082                 if (port->dcb_flag == 0) {
2083                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2084                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2085                         else
2086                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2087                 }
2088
2089                 rxtx_port_config(port);
2090
2091                 rte_eth_macaddr_get(pid, &port->eth_addr);
2092
2093                 map_port_queue_stats_mapping_registers(pid, port);
2094 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2095                 rte_pmd_ixgbe_bypass_init(pid);
2096 #endif
2097
2098                 if (lsc_interrupt &&
2099                     (rte_eth_devices[pid].data->dev_flags &
2100                      RTE_ETH_DEV_INTR_LSC))
2101                         port->dev_conf.intr_conf.lsc = 1;
2102                 if (rmv_interrupt &&
2103                     (rte_eth_devices[pid].data->dev_flags &
2104                      RTE_ETH_DEV_INTR_RMV))
2105                         port->dev_conf.intr_conf.rmv = 1;
2106
2107 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2108                 /* Detect softnic port */
2109                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2110                         port->softnic_enable = 1;
2111                         memset(&port->softport, 0, sizeof(struct softnic_port));
2112
2113                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2114                                 port->softport.tm_flag = 1;
2115                 }
2116 #endif
2117         }
2118 }
2119
2120 void set_port_slave_flag(portid_t slave_pid)
2121 {
2122         struct rte_port *port;
2123
2124         port = &ports[slave_pid];
2125         port->slave_flag = 1;
2126 }
2127
2128 void clear_port_slave_flag(portid_t slave_pid)
2129 {
2130         struct rte_port *port;
2131
2132         port = &ports[slave_pid];
2133         port->slave_flag = 0;
2134 }
2135
2136 uint8_t port_is_bonding_slave(portid_t slave_pid)
2137 {
2138         struct rte_port *port;
2139
2140         port = &ports[slave_pid];
2141         return port->slave_flag;
2142 }
2143
2144 const uint16_t vlan_tags[] = {
2145                 0,  1,  2,  3,  4,  5,  6,  7,
2146                 8,  9, 10, 11,  12, 13, 14, 15,
2147                 16, 17, 18, 19, 20, 21, 22, 23,
2148                 24, 25, 26, 27, 28, 29, 30, 31
2149 };
2150
2151 static  int
2152 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2153                  enum dcb_mode_enable dcb_mode,
2154                  enum rte_eth_nb_tcs num_tcs,
2155                  uint8_t pfc_en)
2156 {
2157         uint8_t i;
2158
2159         /*
2160          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2161          * given above, and the number of traffic classes available for use.
2162          */
2163         if (dcb_mode == DCB_VT_ENABLED) {
2164                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2165                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2166                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2167                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2168
2169                 /* VMDQ+DCB RX and TX configurations */
2170                 vmdq_rx_conf->enable_default_pool = 0;
2171                 vmdq_rx_conf->default_pool = 0;
2172                 vmdq_rx_conf->nb_queue_pools =
2173                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2174                 vmdq_tx_conf->nb_queue_pools =
2175                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2176
2177                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2178                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2179                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2180                         vmdq_rx_conf->pool_map[i].pools =
2181                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2182                 }
2183                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2184                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2185                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2186                 }
2187
2188                 /* set DCB mode of RX and TX of multiple queues */
2189                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2190                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2191         } else {
2192                 struct rte_eth_dcb_rx_conf *rx_conf =
2193                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2194                 struct rte_eth_dcb_tx_conf *tx_conf =
2195                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2196
2197                 rx_conf->nb_tcs = num_tcs;
2198                 tx_conf->nb_tcs = num_tcs;
2199
2200                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2201                         rx_conf->dcb_tc[i] = i % num_tcs;
2202                         tx_conf->dcb_tc[i] = i % num_tcs;
2203                 }
2204                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2205                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2206                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2207         }
2208
2209         if (pfc_en)
2210                 eth_conf->dcb_capability_en =
2211                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2212         else
2213                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2214
2215         return 0;
2216 }
2217
2218 int
2219 init_port_dcb_config(portid_t pid,
2220                      enum dcb_mode_enable dcb_mode,
2221                      enum rte_eth_nb_tcs num_tcs,
2222                      uint8_t pfc_en)
2223 {
2224         struct rte_eth_conf port_conf;
2225         struct rte_port *rte_port;
2226         int retval;
2227         uint16_t i;
2228
2229         rte_port = &ports[pid];
2230
2231         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2232         /* Enter DCB configuration status */
2233         dcb_config = 1;
2234
2235         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2236         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2237         if (retval < 0)
2238                 return retval;
2239         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2240
2241         /**
2242          * Write the configuration into the device.
2243          * Set the numbers of RX & TX queues to 0, so
2244          * the RX & TX queues will not be setup.
2245          */
2246         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2247
2248         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2249
2250         /* If dev_info.vmdq_pool_base is greater than 0,
2251          * the queue id of vmdq pools is started after pf queues.
2252          */
2253         if (dcb_mode == DCB_VT_ENABLED &&
2254             rte_port->dev_info.vmdq_pool_base > 0) {
2255                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2256                         " for port %d.", pid);
2257                 return -1;
2258         }
2259
2260         /* Assume the ports in testpmd have the same dcb capability
2261          * and has the same number of rxq and txq in dcb mode
2262          */
2263         if (dcb_mode == DCB_VT_ENABLED) {
2264                 if (rte_port->dev_info.max_vfs > 0) {
2265                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2266                         nb_txq = rte_port->dev_info.nb_tx_queues;
2267                 } else {
2268                         nb_rxq = rte_port->dev_info.max_rx_queues;
2269                         nb_txq = rte_port->dev_info.max_tx_queues;
2270                 }
2271         } else {
2272                 /*if vt is disabled, use all pf queues */
2273                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2274                         nb_rxq = rte_port->dev_info.max_rx_queues;
2275                         nb_txq = rte_port->dev_info.max_tx_queues;
2276                 } else {
2277                         nb_rxq = (queueid_t)num_tcs;
2278                         nb_txq = (queueid_t)num_tcs;
2279
2280                 }
2281         }
2282         rx_free_thresh = 64;
2283
2284         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2285
2286         rxtx_port_config(rte_port);
2287         /* VLAN filter */
2288         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2289         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2290                 rx_vft_set(pid, vlan_tags[i], 1);
2291
2292         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2293         map_port_queue_stats_mapping_registers(pid, rte_port);
2294
2295         rte_port->dcb_flag = 1;
2296
2297         return 0;
2298 }
2299
2300 static void
2301 init_port(void)
2302 {
2303         /* Configuration of Ethernet ports. */
2304         ports = rte_zmalloc("testpmd: ports",
2305                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2306                             RTE_CACHE_LINE_SIZE);
2307         if (ports == NULL) {
2308                 rte_exit(EXIT_FAILURE,
2309                                 "rte_zmalloc(%d struct rte_port) failed\n",
2310                                 RTE_MAX_ETHPORTS);
2311         }
2312 }
2313
2314 static void
2315 force_quit(void)
2316 {
2317         pmd_test_exit();
2318         prompt_exit();
2319 }
2320
2321 static void
2322 print_stats(void)
2323 {
2324         uint8_t i;
2325         const char clr[] = { 27, '[', '2', 'J', '\0' };
2326         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2327
2328         /* Clear screen and move to top left */
2329         printf("%s%s", clr, top_left);
2330
2331         printf("\nPort statistics ====================================");
2332         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2333                 nic_stats_display(fwd_ports_ids[i]);
2334 }
2335
2336 static void
2337 signal_handler(int signum)
2338 {
2339         if (signum == SIGINT || signum == SIGTERM) {
2340                 printf("\nSignal %d received, preparing to exit...\n",
2341                                 signum);
2342 #ifdef RTE_LIBRTE_PDUMP
2343                 /* uninitialize packet capture framework */
2344                 rte_pdump_uninit();
2345 #endif
2346 #ifdef RTE_LIBRTE_LATENCY_STATS
2347                 rte_latencystats_uninit();
2348 #endif
2349                 force_quit();
2350                 /* Set flag to indicate the force termination. */
2351                 f_quit = 1;
2352                 /* exit with the expected status */
2353                 signal(signum, SIG_DFL);
2354                 kill(getpid(), signum);
2355         }
2356 }
2357
2358 int
2359 main(int argc, char** argv)
2360 {
2361         int  diag;
2362         portid_t port_id;
2363
2364         signal(SIGINT, signal_handler);
2365         signal(SIGTERM, signal_handler);
2366
2367         diag = rte_eal_init(argc, argv);
2368         if (diag < 0)
2369                 rte_panic("Cannot init EAL\n");
2370
2371         testpmd_logtype = rte_log_register("testpmd");
2372         if (testpmd_logtype < 0)
2373                 rte_panic("Cannot register log type");
2374         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2375
2376         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2377                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2378                         strerror(errno));
2379         }
2380
2381 #ifdef RTE_LIBRTE_PDUMP
2382         /* initialize packet capture framework */
2383         rte_pdump_init(NULL);
2384 #endif
2385
2386         nb_ports = (portid_t) rte_eth_dev_count();
2387         if (nb_ports == 0)
2388                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2389
2390         /* allocate port structures, and init them */
2391         init_port();
2392
2393         set_def_fwd_config();
2394         if (nb_lcores == 0)
2395                 rte_panic("Empty set of forwarding logical cores - check the "
2396                           "core mask supplied in the command parameters\n");
2397
2398         /* Bitrate/latency stats disabled by default */
2399 #ifdef RTE_LIBRTE_BITRATE
2400         bitrate_enabled = 0;
2401 #endif
2402 #ifdef RTE_LIBRTE_LATENCY_STATS
2403         latencystats_enabled = 0;
2404 #endif
2405
2406         argc -= diag;
2407         argv += diag;
2408         if (argc > 1)
2409                 launch_args_parse(argc, argv);
2410
2411         if (tx_first && interactive)
2412                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2413                                 "interactive mode.\n");
2414
2415         if (tx_first && lsc_interrupt) {
2416                 printf("Warning: lsc_interrupt needs to be off when "
2417                                 " using tx_first. Disabling.\n");
2418                 lsc_interrupt = 0;
2419         }
2420
2421         if (!nb_rxq && !nb_txq)
2422                 printf("Warning: Either rx or tx queues should be non-zero\n");
2423
2424         if (nb_rxq > 1 && nb_rxq > nb_txq)
2425                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2426                        "but nb_txq=%d will prevent to fully test it.\n",
2427                        nb_rxq, nb_txq);
2428
2429         init_config();
2430         if (start_port(RTE_PORT_ALL) != 0)
2431                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2432
2433         /* set all ports to promiscuous mode by default */
2434         RTE_ETH_FOREACH_DEV(port_id)
2435                 rte_eth_promiscuous_enable(port_id);
2436
2437         /* Init metrics library */
2438         rte_metrics_init(rte_socket_id());
2439
2440 #ifdef RTE_LIBRTE_LATENCY_STATS
2441         if (latencystats_enabled != 0) {
2442                 int ret = rte_latencystats_init(1, NULL);
2443                 if (ret)
2444                         printf("Warning: latencystats init()"
2445                                 " returned error %d\n", ret);
2446                 printf("Latencystats running on lcore %d\n",
2447                         latencystats_lcore_id);
2448         }
2449 #endif
2450
2451         /* Setup bitrate stats */
2452 #ifdef RTE_LIBRTE_BITRATE
2453         if (bitrate_enabled != 0) {
2454                 bitrate_data = rte_stats_bitrate_create();
2455                 if (bitrate_data == NULL)
2456                         rte_exit(EXIT_FAILURE,
2457                                 "Could not allocate bitrate data.\n");
2458                 rte_stats_bitrate_reg(bitrate_data);
2459         }
2460 #endif
2461
2462 #ifdef RTE_LIBRTE_CMDLINE
2463         if (strlen(cmdline_filename) != 0)
2464                 cmdline_read_from_file(cmdline_filename);
2465
2466         if (interactive == 1) {
2467                 if (auto_start) {
2468                         printf("Start automatic packet forwarding\n");
2469                         start_packet_forwarding(0);
2470                 }
2471                 prompt();
2472                 pmd_test_exit();
2473         } else
2474 #endif
2475         {
2476                 char c;
2477                 int rc;
2478
2479                 f_quit = 0;
2480
2481                 printf("No commandline core given, start packet forwarding\n");
2482                 start_packet_forwarding(tx_first);
2483                 if (stats_period != 0) {
2484                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2485                         uint64_t timer_period;
2486
2487                         /* Convert to number of cycles */
2488                         timer_period = stats_period * rte_get_timer_hz();
2489
2490                         while (f_quit == 0) {
2491                                 cur_time = rte_get_timer_cycles();
2492                                 diff_time += cur_time - prev_time;
2493
2494                                 if (diff_time >= timer_period) {
2495                                         print_stats();
2496                                         /* Reset the timer */
2497                                         diff_time = 0;
2498                                 }
2499                                 /* Sleep to avoid unnecessary checks */
2500                                 prev_time = cur_time;
2501                                 sleep(1);
2502                         }
2503                 }
2504
2505                 printf("Press enter to exit\n");
2506                 rc = read(0, &c, 1);
2507                 pmd_test_exit();
2508                 if (rc < 0)
2509                         return 1;
2510         }
2511
2512         return 0;
2513 }