app/testpmd: fix invalid Rx queue number setting
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61
62 #include "testpmd.h"
63
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;        /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130         &io_fwd_engine,
131         &mac_fwd_engine,
132         &mac_swap_engine,
133         &flow_gen_engine,
134         &rx_only_engine,
135         &tx_only_engine,
136         &csum_fwd_engine,
137         &icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139         &softnic_tm_engine,
140         &softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143         &ieee1588_fwd_engine,
144 #endif
145         NULL,
146 };
147
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170         TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Receive Side Scaling (RSS) configuration.
235  */
236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
237
238 /*
239  * Port topology configuration
240  */
241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242
243 /*
244  * Avoids to flush all the RX streams before starts forwarding.
245  */
246 uint8_t no_flush_rx = 0; /* flush by default */
247
248 /*
249  * Flow API isolated mode.
250  */
251 uint8_t flow_isolate_all;
252
253 /*
254  * Avoids to check link status when starting/stopping a port.
255  */
256 uint8_t no_link_check = 0; /* check by default */
257
258 /*
259  * Enable link status change notification
260  */
261 uint8_t lsc_interrupt = 1; /* enabled by default */
262
263 /*
264  * Enable device removal notification.
265  */
266 uint8_t rmv_interrupt = 1; /* enabled by default */
267
268 /*
269  * Display or mask ether events
270  * Default to all events except VF_MBOX
271  */
272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
273                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
274                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
275                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
276                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
277                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
278
279 /*
280  * NIC bypass mode configuration options.
281  */
282
283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
284 /* The NIC bypass watchdog timeout. */
285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
286 #endif
287
288
289 #ifdef RTE_LIBRTE_LATENCY_STATS
290
291 /*
292  * Set when latency stats is enabled in the commandline
293  */
294 uint8_t latencystats_enabled;
295
296 /*
297  * Lcore ID to serive latency statistics.
298  */
299 lcoreid_t latencystats_lcore_id = -1;
300
301 #endif
302
303 /*
304  * Ethernet device configuration.
305  */
306 struct rte_eth_rxmode rx_mode = {
307         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308         .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
309                      DEV_RX_OFFLOAD_VLAN_STRIP |
310                      DEV_RX_OFFLOAD_CRC_STRIP),
311         .ignore_offload_bitfield = 1,
312 };
313
314 struct rte_eth_txmode tx_mode = {
315         .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
316 };
317
318 struct rte_fdir_conf fdir_conf = {
319         .mode = RTE_FDIR_MODE_NONE,
320         .pballoc = RTE_FDIR_PBALLOC_64K,
321         .status = RTE_FDIR_REPORT_STATUS,
322         .mask = {
323                 .vlan_tci_mask = 0x0,
324                 .ipv4_mask     = {
325                         .src_ip = 0xFFFFFFFF,
326                         .dst_ip = 0xFFFFFFFF,
327                 },
328                 .ipv6_mask     = {
329                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331                 },
332                 .src_port_mask = 0xFFFF,
333                 .dst_port_mask = 0xFFFF,
334                 .mac_addr_byte_mask = 0xFF,
335                 .tunnel_type_mask = 1,
336                 .tunnel_id_mask = 0xFFFFFFFF,
337         },
338         .drop_queue = 127,
339 };
340
341 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
342
343 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
344 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
345
346 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
347 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
348
349 uint16_t nb_tx_queue_stats_mappings = 0;
350 uint16_t nb_rx_queue_stats_mappings = 0;
351
352 /*
353  * Display zero values by default for xstats
354  */
355 uint8_t xstats_hide_zero;
356
357 unsigned int num_sockets = 0;
358 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
359
360 #ifdef RTE_LIBRTE_BITRATE
361 /* Bitrate statistics */
362 struct rte_stats_bitrates *bitrate_data;
363 lcoreid_t bitrate_lcore_id;
364 uint8_t bitrate_enabled;
365 #endif
366
367 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
368 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
369
370 /* Forward function declarations */
371 static void map_port_queue_stats_mapping_registers(portid_t pi,
372                                                    struct rte_port *port);
373 static void check_all_ports_link_status(uint32_t port_mask);
374 static int eth_event_callback(portid_t port_id,
375                               enum rte_eth_event_type type,
376                               void *param, void *ret_param);
377
378 /*
379  * Check if all the ports are started.
380  * If yes, return positive value. If not, return zero.
381  */
382 static int all_ports_started(void);
383
384 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
385 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
386
387 /*
388  * Helper function to check if socket is already discovered.
389  * If yes, return positive value. If not, return zero.
390  */
391 int
392 new_socket_id(unsigned int socket_id)
393 {
394         unsigned int i;
395
396         for (i = 0; i < num_sockets; i++) {
397                 if (socket_ids[i] == socket_id)
398                         return 0;
399         }
400         return 1;
401 }
402
403 /*
404  * Setup default configuration.
405  */
406 static void
407 set_default_fwd_lcores_config(void)
408 {
409         unsigned int i;
410         unsigned int nb_lc;
411         unsigned int sock_num;
412
413         nb_lc = 0;
414         for (i = 0; i < RTE_MAX_LCORE; i++) {
415                 sock_num = rte_lcore_to_socket_id(i);
416                 if (new_socket_id(sock_num)) {
417                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
418                                 rte_exit(EXIT_FAILURE,
419                                          "Total sockets greater than %u\n",
420                                          RTE_MAX_NUMA_NODES);
421                         }
422                         socket_ids[num_sockets++] = sock_num;
423                 }
424                 if (!rte_lcore_is_enabled(i))
425                         continue;
426                 if (i == rte_get_master_lcore())
427                         continue;
428                 fwd_lcores_cpuids[nb_lc++] = i;
429         }
430         nb_lcores = (lcoreid_t) nb_lc;
431         nb_cfg_lcores = nb_lcores;
432         nb_fwd_lcores = 1;
433 }
434
435 static void
436 set_def_peer_eth_addrs(void)
437 {
438         portid_t i;
439
440         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
441                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
442                 peer_eth_addrs[i].addr_bytes[5] = i;
443         }
444 }
445
446 static void
447 set_default_fwd_ports_config(void)
448 {
449         portid_t pt_id;
450         int i = 0;
451
452         RTE_ETH_FOREACH_DEV(pt_id)
453                 fwd_ports_ids[i++] = pt_id;
454
455         nb_cfg_ports = nb_ports;
456         nb_fwd_ports = nb_ports;
457 }
458
459 void
460 set_def_fwd_config(void)
461 {
462         set_default_fwd_lcores_config();
463         set_def_peer_eth_addrs();
464         set_default_fwd_ports_config();
465 }
466
467 /*
468  * Configuration initialisation done once at init time.
469  */
470 static void
471 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
472                  unsigned int socket_id)
473 {
474         char pool_name[RTE_MEMPOOL_NAMESIZE];
475         struct rte_mempool *rte_mp = NULL;
476         uint32_t mb_size;
477
478         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
479         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
480
481         TESTPMD_LOG(INFO,
482                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
483                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
484
485         if (mp_anon != 0) {
486                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
487                         mb_size, (unsigned) mb_mempool_cache,
488                         sizeof(struct rte_pktmbuf_pool_private),
489                         socket_id, 0);
490                 if (rte_mp == NULL)
491                         goto err;
492
493                 if (rte_mempool_populate_anon(rte_mp) == 0) {
494                         rte_mempool_free(rte_mp);
495                         rte_mp = NULL;
496                         goto err;
497                 }
498                 rte_pktmbuf_pool_init(rte_mp, NULL);
499                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
500         } else {
501                 /* wrapper to rte_mempool_create() */
502                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
503                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
504         }
505
506 err:
507         if (rte_mp == NULL) {
508                 rte_exit(EXIT_FAILURE,
509                         "Creation of mbuf pool for socket %u failed: %s\n",
510                         socket_id, rte_strerror(rte_errno));
511         } else if (verbose_level > 0) {
512                 rte_mempool_dump(stdout, rte_mp);
513         }
514 }
515
516 /*
517  * Check given socket id is valid or not with NUMA mode,
518  * if valid, return 0, else return -1
519  */
520 static int
521 check_socket_id(const unsigned int socket_id)
522 {
523         static int warning_once = 0;
524
525         if (new_socket_id(socket_id)) {
526                 if (!warning_once && numa_support)
527                         printf("Warning: NUMA should be configured manually by"
528                                " using --port-numa-config and"
529                                " --ring-numa-config parameters along with"
530                                " --numa.\n");
531                 warning_once = 1;
532                 return -1;
533         }
534         return 0;
535 }
536
537 /*
538  * Get the allowed maximum number of RX queues.
539  * *pid return the port id which has minimal value of
540  * max_rx_queues in all ports.
541  */
542 queueid_t
543 get_allowed_max_nb_rxq(portid_t *pid)
544 {
545         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
546         portid_t pi;
547         struct rte_eth_dev_info dev_info;
548
549         RTE_ETH_FOREACH_DEV(pi) {
550                 rte_eth_dev_info_get(pi, &dev_info);
551                 if (dev_info.max_rx_queues < allowed_max_rxq) {
552                         allowed_max_rxq = dev_info.max_rx_queues;
553                         *pid = pi;
554                 }
555         }
556         return allowed_max_rxq;
557 }
558
559 /*
560  * Check input rxq is valid or not.
561  * If input rxq is not greater than any of maximum number
562  * of RX queues of all ports, it is valid.
563  * if valid, return 0, else return -1
564  */
565 int
566 check_nb_rxq(queueid_t rxq)
567 {
568         queueid_t allowed_max_rxq;
569         portid_t pid = 0;
570
571         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
572         if (rxq > allowed_max_rxq) {
573                 printf("Fail: input rxq (%u) can't be greater "
574                        "than max_rx_queues (%u) of port %u\n",
575                        rxq,
576                        allowed_max_rxq,
577                        pid);
578                 return -1;
579         }
580         return 0;
581 }
582
583 static void
584 init_config(void)
585 {
586         portid_t pid;
587         struct rte_port *port;
588         struct rte_mempool *mbp;
589         unsigned int nb_mbuf_per_pool;
590         lcoreid_t  lc_id;
591         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
592         struct rte_gro_param gro_param;
593         uint32_t gso_types;
594
595         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
596
597         if (numa_support) {
598                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
599                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
600                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
601         }
602
603         /* Configuration of logical cores. */
604         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
605                                 sizeof(struct fwd_lcore *) * nb_lcores,
606                                 RTE_CACHE_LINE_SIZE);
607         if (fwd_lcores == NULL) {
608                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
609                                                         "failed\n", nb_lcores);
610         }
611         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
612                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
613                                                sizeof(struct fwd_lcore),
614                                                RTE_CACHE_LINE_SIZE);
615                 if (fwd_lcores[lc_id] == NULL) {
616                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
617                                                                 "failed\n");
618                 }
619                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
620         }
621
622         RTE_ETH_FOREACH_DEV(pid) {
623                 port = &ports[pid];
624                 /* Apply default Tx configuration for all ports */
625                 port->dev_conf.txmode = tx_mode;
626                 port->dev_conf.rxmode = rx_mode;
627                 rte_eth_dev_info_get(pid, &port->dev_info);
628                 if (!(port->dev_info.tx_offload_capa &
629                       DEV_TX_OFFLOAD_MBUF_FAST_FREE))
630                         port->dev_conf.txmode.offloads &=
631                                 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
632
633                 if (numa_support) {
634                         if (port_numa[pid] != NUMA_NO_CONFIG)
635                                 port_per_socket[port_numa[pid]]++;
636                         else {
637                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
638
639                                 /* if socket_id is invalid, set to 0 */
640                                 if (check_socket_id(socket_id) < 0)
641                                         socket_id = 0;
642                                 port_per_socket[socket_id]++;
643                         }
644                 }
645
646                 /* set flag to initialize port/queue */
647                 port->need_reconfig = 1;
648                 port->need_reconfig_queues = 1;
649         }
650
651         /*
652          * Create pools of mbuf.
653          * If NUMA support is disabled, create a single pool of mbuf in
654          * socket 0 memory by default.
655          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
656          *
657          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
658          * nb_txd can be configured at run time.
659          */
660         if (param_total_num_mbufs)
661                 nb_mbuf_per_pool = param_total_num_mbufs;
662         else {
663                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
664                         (nb_lcores * mb_mempool_cache) +
665                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
666                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
667         }
668
669         if (numa_support) {
670                 uint8_t i;
671
672                 for (i = 0; i < num_sockets; i++)
673                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
674                                          socket_ids[i]);
675         } else {
676                 if (socket_num == UMA_NO_CONFIG)
677                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
678                 else
679                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
680                                                  socket_num);
681         }
682
683         init_port_config();
684
685         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
686                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
687         /*
688          * Records which Mbuf pool to use by each logical core, if needed.
689          */
690         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
691                 mbp = mbuf_pool_find(
692                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
693
694                 if (mbp == NULL)
695                         mbp = mbuf_pool_find(0);
696                 fwd_lcores[lc_id]->mbp = mbp;
697                 /* initialize GSO context */
698                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
699                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
700                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
701                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
702                         ETHER_CRC_LEN;
703                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
704         }
705
706         /* Configuration of packet forwarding streams. */
707         if (init_fwd_streams() < 0)
708                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
709
710         fwd_config_setup();
711
712         /* create a gro context for each lcore */
713         gro_param.gro_types = RTE_GRO_TCP_IPV4;
714         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
715         gro_param.max_item_per_flow = MAX_PKT_BURST;
716         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
717                 gro_param.socket_id = rte_lcore_to_socket_id(
718                                 fwd_lcores_cpuids[lc_id]);
719                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
720                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
721                         rte_exit(EXIT_FAILURE,
722                                         "rte_gro_ctx_create() failed\n");
723                 }
724         }
725 }
726
727
728 void
729 reconfig(portid_t new_port_id, unsigned socket_id)
730 {
731         struct rte_port *port;
732
733         /* Reconfiguration of Ethernet ports. */
734         port = &ports[new_port_id];
735         rte_eth_dev_info_get(new_port_id, &port->dev_info);
736
737         /* set flag to initialize port/queue */
738         port->need_reconfig = 1;
739         port->need_reconfig_queues = 1;
740         port->socket_id = socket_id;
741
742         init_port_config();
743 }
744
745
746 int
747 init_fwd_streams(void)
748 {
749         portid_t pid;
750         struct rte_port *port;
751         streamid_t sm_id, nb_fwd_streams_new;
752         queueid_t q;
753
754         /* set socket id according to numa or not */
755         RTE_ETH_FOREACH_DEV(pid) {
756                 port = &ports[pid];
757                 if (nb_rxq > port->dev_info.max_rx_queues) {
758                         printf("Fail: nb_rxq(%d) is greater than "
759                                 "max_rx_queues(%d)\n", nb_rxq,
760                                 port->dev_info.max_rx_queues);
761                         return -1;
762                 }
763                 if (nb_txq > port->dev_info.max_tx_queues) {
764                         printf("Fail: nb_txq(%d) is greater than "
765                                 "max_tx_queues(%d)\n", nb_txq,
766                                 port->dev_info.max_tx_queues);
767                         return -1;
768                 }
769                 if (numa_support) {
770                         if (port_numa[pid] != NUMA_NO_CONFIG)
771                                 port->socket_id = port_numa[pid];
772                         else {
773                                 port->socket_id = rte_eth_dev_socket_id(pid);
774
775                                 /* if socket_id is invalid, set to 0 */
776                                 if (check_socket_id(port->socket_id) < 0)
777                                         port->socket_id = 0;
778                         }
779                 }
780                 else {
781                         if (socket_num == UMA_NO_CONFIG)
782                                 port->socket_id = 0;
783                         else
784                                 port->socket_id = socket_num;
785                 }
786         }
787
788         q = RTE_MAX(nb_rxq, nb_txq);
789         if (q == 0) {
790                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
791                 return -1;
792         }
793         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
794         if (nb_fwd_streams_new == nb_fwd_streams)
795                 return 0;
796         /* clear the old */
797         if (fwd_streams != NULL) {
798                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
799                         if (fwd_streams[sm_id] == NULL)
800                                 continue;
801                         rte_free(fwd_streams[sm_id]);
802                         fwd_streams[sm_id] = NULL;
803                 }
804                 rte_free(fwd_streams);
805                 fwd_streams = NULL;
806         }
807
808         /* init new */
809         nb_fwd_streams = nb_fwd_streams_new;
810         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
811                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
812         if (fwd_streams == NULL)
813                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
814                                                 "failed\n", nb_fwd_streams);
815
816         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
817                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
818                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
819                 if (fwd_streams[sm_id] == NULL)
820                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
821                                                                 " failed\n");
822         }
823
824         return 0;
825 }
826
827 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
828 static void
829 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
830 {
831         unsigned int total_burst;
832         unsigned int nb_burst;
833         unsigned int burst_stats[3];
834         uint16_t pktnb_stats[3];
835         uint16_t nb_pkt;
836         int burst_percent[3];
837
838         /*
839          * First compute the total number of packet bursts and the
840          * two highest numbers of bursts of the same number of packets.
841          */
842         total_burst = 0;
843         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
844         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
845         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
846                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
847                 if (nb_burst == 0)
848                         continue;
849                 total_burst += nb_burst;
850                 if (nb_burst > burst_stats[0]) {
851                         burst_stats[1] = burst_stats[0];
852                         pktnb_stats[1] = pktnb_stats[0];
853                         burst_stats[0] = nb_burst;
854                         pktnb_stats[0] = nb_pkt;
855                 }
856         }
857         if (total_burst == 0)
858                 return;
859         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
860         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
861                burst_percent[0], (int) pktnb_stats[0]);
862         if (burst_stats[0] == total_burst) {
863                 printf("]\n");
864                 return;
865         }
866         if (burst_stats[0] + burst_stats[1] == total_burst) {
867                 printf(" + %d%% of %d pkts]\n",
868                        100 - burst_percent[0], pktnb_stats[1]);
869                 return;
870         }
871         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
872         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
873         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
874                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
875                 return;
876         }
877         printf(" + %d%% of %d pkts + %d%% of others]\n",
878                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
879 }
880 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
881
882 static void
883 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
884 {
885         struct rte_port *port;
886         uint8_t i;
887
888         static const char *fwd_stats_border = "----------------------";
889
890         port = &ports[port_id];
891         printf("\n  %s Forward statistics for port %-2d %s\n",
892                fwd_stats_border, port_id, fwd_stats_border);
893
894         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
895                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
896                        "%-"PRIu64"\n",
897                        stats->ipackets, stats->imissed,
898                        (uint64_t) (stats->ipackets + stats->imissed));
899
900                 if (cur_fwd_eng == &csum_fwd_engine)
901                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
902                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
903                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
904                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
905                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
906                 }
907
908                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
909                        "%-"PRIu64"\n",
910                        stats->opackets, port->tx_dropped,
911                        (uint64_t) (stats->opackets + port->tx_dropped));
912         }
913         else {
914                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
915                        "%14"PRIu64"\n",
916                        stats->ipackets, stats->imissed,
917                        (uint64_t) (stats->ipackets + stats->imissed));
918
919                 if (cur_fwd_eng == &csum_fwd_engine)
920                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
921                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
922                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
923                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
924                         printf("  RX-nombufs:             %14"PRIu64"\n",
925                                stats->rx_nombuf);
926                 }
927
928                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
929                        "%14"PRIu64"\n",
930                        stats->opackets, port->tx_dropped,
931                        (uint64_t) (stats->opackets + port->tx_dropped));
932         }
933
934 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
935         if (port->rx_stream)
936                 pkt_burst_stats_display("RX",
937                         &port->rx_stream->rx_burst_stats);
938         if (port->tx_stream)
939                 pkt_burst_stats_display("TX",
940                         &port->tx_stream->tx_burst_stats);
941 #endif
942
943         if (port->rx_queue_stats_mapping_enabled) {
944                 printf("\n");
945                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
946                         printf("  Stats reg %2d RX-packets:%14"PRIu64
947                                "     RX-errors:%14"PRIu64
948                                "    RX-bytes:%14"PRIu64"\n",
949                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
950                 }
951                 printf("\n");
952         }
953         if (port->tx_queue_stats_mapping_enabled) {
954                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
955                         printf("  Stats reg %2d TX-packets:%14"PRIu64
956                                "                                 TX-bytes:%14"PRIu64"\n",
957                                i, stats->q_opackets[i], stats->q_obytes[i]);
958                 }
959         }
960
961         printf("  %s--------------------------------%s\n",
962                fwd_stats_border, fwd_stats_border);
963 }
964
965 static void
966 fwd_stream_stats_display(streamid_t stream_id)
967 {
968         struct fwd_stream *fs;
969         static const char *fwd_top_stats_border = "-------";
970
971         fs = fwd_streams[stream_id];
972         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
973             (fs->fwd_dropped == 0))
974                 return;
975         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
976                "TX Port=%2d/Queue=%2d %s\n",
977                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
978                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
979         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
980                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
981
982         /* if checksum mode */
983         if (cur_fwd_eng == &csum_fwd_engine) {
984                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
985                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
986         }
987
988 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
989         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
990         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
991 #endif
992 }
993
994 static void
995 flush_fwd_rx_queues(void)
996 {
997         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
998         portid_t  rxp;
999         portid_t port_id;
1000         queueid_t rxq;
1001         uint16_t  nb_rx;
1002         uint16_t  i;
1003         uint8_t   j;
1004         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1005         uint64_t timer_period;
1006
1007         /* convert to number of cycles */
1008         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1009
1010         for (j = 0; j < 2; j++) {
1011                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1012                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1013                                 port_id = fwd_ports_ids[rxp];
1014                                 /**
1015                                 * testpmd can stuck in the below do while loop
1016                                 * if rte_eth_rx_burst() always returns nonzero
1017                                 * packets. So timer is added to exit this loop
1018                                 * after 1sec timer expiry.
1019                                 */
1020                                 prev_tsc = rte_rdtsc();
1021                                 do {
1022                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1023                                                 pkts_burst, MAX_PKT_BURST);
1024                                         for (i = 0; i < nb_rx; i++)
1025                                                 rte_pktmbuf_free(pkts_burst[i]);
1026
1027                                         cur_tsc = rte_rdtsc();
1028                                         diff_tsc = cur_tsc - prev_tsc;
1029                                         timer_tsc += diff_tsc;
1030                                 } while ((nb_rx > 0) &&
1031                                         (timer_tsc < timer_period));
1032                                 timer_tsc = 0;
1033                         }
1034                 }
1035                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1036         }
1037 }
1038
1039 static void
1040 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1041 {
1042         struct fwd_stream **fsm;
1043         streamid_t nb_fs;
1044         streamid_t sm_id;
1045 #ifdef RTE_LIBRTE_BITRATE
1046         uint64_t tics_per_1sec;
1047         uint64_t tics_datum;
1048         uint64_t tics_current;
1049         uint8_t idx_port, cnt_ports;
1050
1051         cnt_ports = rte_eth_dev_count();
1052         tics_datum = rte_rdtsc();
1053         tics_per_1sec = rte_get_timer_hz();
1054 #endif
1055         fsm = &fwd_streams[fc->stream_idx];
1056         nb_fs = fc->stream_nb;
1057         do {
1058                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1059                         (*pkt_fwd)(fsm[sm_id]);
1060 #ifdef RTE_LIBRTE_BITRATE
1061                 if (bitrate_enabled != 0 &&
1062                                 bitrate_lcore_id == rte_lcore_id()) {
1063                         tics_current = rte_rdtsc();
1064                         if (tics_current - tics_datum >= tics_per_1sec) {
1065                                 /* Periodic bitrate calculation */
1066                                 for (idx_port = 0;
1067                                                 idx_port < cnt_ports;
1068                                                 idx_port++)
1069                                         rte_stats_bitrate_calc(bitrate_data,
1070                                                 idx_port);
1071                                 tics_datum = tics_current;
1072                         }
1073                 }
1074 #endif
1075 #ifdef RTE_LIBRTE_LATENCY_STATS
1076                 if (latencystats_enabled != 0 &&
1077                                 latencystats_lcore_id == rte_lcore_id())
1078                         rte_latencystats_update();
1079 #endif
1080
1081         } while (! fc->stopped);
1082 }
1083
1084 static int
1085 start_pkt_forward_on_core(void *fwd_arg)
1086 {
1087         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1088                              cur_fwd_config.fwd_eng->packet_fwd);
1089         return 0;
1090 }
1091
1092 /*
1093  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1094  * Used to start communication flows in network loopback test configurations.
1095  */
1096 static int
1097 run_one_txonly_burst_on_core(void *fwd_arg)
1098 {
1099         struct fwd_lcore *fwd_lc;
1100         struct fwd_lcore tmp_lcore;
1101
1102         fwd_lc = (struct fwd_lcore *) fwd_arg;
1103         tmp_lcore = *fwd_lc;
1104         tmp_lcore.stopped = 1;
1105         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1106         return 0;
1107 }
1108
1109 /*
1110  * Launch packet forwarding:
1111  *     - Setup per-port forwarding context.
1112  *     - launch logical cores with their forwarding configuration.
1113  */
1114 static void
1115 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1116 {
1117         port_fwd_begin_t port_fwd_begin;
1118         unsigned int i;
1119         unsigned int lc_id;
1120         int diag;
1121
1122         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1123         if (port_fwd_begin != NULL) {
1124                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1125                         (*port_fwd_begin)(fwd_ports_ids[i]);
1126         }
1127         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1128                 lc_id = fwd_lcores_cpuids[i];
1129                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1130                         fwd_lcores[i]->stopped = 0;
1131                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1132                                                      fwd_lcores[i], lc_id);
1133                         if (diag != 0)
1134                                 printf("launch lcore %u failed - diag=%d\n",
1135                                        lc_id, diag);
1136                 }
1137         }
1138 }
1139
1140 /*
1141  * Launch packet forwarding configuration.
1142  */
1143 void
1144 start_packet_forwarding(int with_tx_first)
1145 {
1146         port_fwd_begin_t port_fwd_begin;
1147         port_fwd_end_t  port_fwd_end;
1148         struct rte_port *port;
1149         unsigned int i;
1150         portid_t   pt_id;
1151         streamid_t sm_id;
1152
1153         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1154                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1155
1156         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1157                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1158
1159         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1160                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1161                 (!nb_rxq || !nb_txq))
1162                 rte_exit(EXIT_FAILURE,
1163                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1164                         cur_fwd_eng->fwd_mode_name);
1165
1166         if (all_ports_started() == 0) {
1167                 printf("Not all ports were started\n");
1168                 return;
1169         }
1170         if (test_done == 0) {
1171                 printf("Packet forwarding already started\n");
1172                 return;
1173         }
1174
1175         if (init_fwd_streams() < 0) {
1176                 printf("Fail from init_fwd_streams()\n");
1177                 return;
1178         }
1179
1180         if(dcb_test) {
1181                 for (i = 0; i < nb_fwd_ports; i++) {
1182                         pt_id = fwd_ports_ids[i];
1183                         port = &ports[pt_id];
1184                         if (!port->dcb_flag) {
1185                                 printf("In DCB mode, all forwarding ports must "
1186                                        "be configured in this mode.\n");
1187                                 return;
1188                         }
1189                 }
1190                 if (nb_fwd_lcores == 1) {
1191                         printf("In DCB mode,the nb forwarding cores "
1192                                "should be larger than 1.\n");
1193                         return;
1194                 }
1195         }
1196         test_done = 0;
1197
1198         if(!no_flush_rx)
1199                 flush_fwd_rx_queues();
1200
1201         fwd_config_setup();
1202         pkt_fwd_config_display(&cur_fwd_config);
1203         rxtx_config_display();
1204
1205         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1206                 pt_id = fwd_ports_ids[i];
1207                 port = &ports[pt_id];
1208                 rte_eth_stats_get(pt_id, &port->stats);
1209                 port->tx_dropped = 0;
1210
1211                 map_port_queue_stats_mapping_registers(pt_id, port);
1212         }
1213         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1214                 fwd_streams[sm_id]->rx_packets = 0;
1215                 fwd_streams[sm_id]->tx_packets = 0;
1216                 fwd_streams[sm_id]->fwd_dropped = 0;
1217                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1218                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1219
1220 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1221                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1222                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1223                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1224                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1225 #endif
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227                 fwd_streams[sm_id]->core_cycles = 0;
1228 #endif
1229         }
1230         if (with_tx_first) {
1231                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1232                 if (port_fwd_begin != NULL) {
1233                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1234                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1235                 }
1236                 while (with_tx_first--) {
1237                         launch_packet_forwarding(
1238                                         run_one_txonly_burst_on_core);
1239                         rte_eal_mp_wait_lcore();
1240                 }
1241                 port_fwd_end = tx_only_engine.port_fwd_end;
1242                 if (port_fwd_end != NULL) {
1243                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1244                                 (*port_fwd_end)(fwd_ports_ids[i]);
1245                 }
1246         }
1247         launch_packet_forwarding(start_pkt_forward_on_core);
1248 }
1249
1250 void
1251 stop_packet_forwarding(void)
1252 {
1253         struct rte_eth_stats stats;
1254         struct rte_port *port;
1255         port_fwd_end_t  port_fwd_end;
1256         int i;
1257         portid_t   pt_id;
1258         streamid_t sm_id;
1259         lcoreid_t  lc_id;
1260         uint64_t total_recv;
1261         uint64_t total_xmit;
1262         uint64_t total_rx_dropped;
1263         uint64_t total_tx_dropped;
1264         uint64_t total_rx_nombuf;
1265         uint64_t tx_dropped;
1266         uint64_t rx_bad_ip_csum;
1267         uint64_t rx_bad_l4_csum;
1268 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1269         uint64_t fwd_cycles;
1270 #endif
1271
1272         static const char *acc_stats_border = "+++++++++++++++";
1273
1274         if (test_done) {
1275                 printf("Packet forwarding not started\n");
1276                 return;
1277         }
1278         printf("Telling cores to stop...");
1279         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1280                 fwd_lcores[lc_id]->stopped = 1;
1281         printf("\nWaiting for lcores to finish...\n");
1282         rte_eal_mp_wait_lcore();
1283         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1284         if (port_fwd_end != NULL) {
1285                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1286                         pt_id = fwd_ports_ids[i];
1287                         (*port_fwd_end)(pt_id);
1288                 }
1289         }
1290 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1291         fwd_cycles = 0;
1292 #endif
1293         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1294                 if (cur_fwd_config.nb_fwd_streams >
1295                     cur_fwd_config.nb_fwd_ports) {
1296                         fwd_stream_stats_display(sm_id);
1297                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1298                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1299                 } else {
1300                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1301                                 fwd_streams[sm_id];
1302                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1303                                 fwd_streams[sm_id];
1304                 }
1305                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1306                 tx_dropped = (uint64_t) (tx_dropped +
1307                                          fwd_streams[sm_id]->fwd_dropped);
1308                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1309
1310                 rx_bad_ip_csum =
1311                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1312                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1313                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1314                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1315                                                         rx_bad_ip_csum;
1316
1317                 rx_bad_l4_csum =
1318                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1319                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1320                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1321                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1322                                                         rx_bad_l4_csum;
1323
1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1325                 fwd_cycles = (uint64_t) (fwd_cycles +
1326                                          fwd_streams[sm_id]->core_cycles);
1327 #endif
1328         }
1329         total_recv = 0;
1330         total_xmit = 0;
1331         total_rx_dropped = 0;
1332         total_tx_dropped = 0;
1333         total_rx_nombuf  = 0;
1334         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1335                 pt_id = fwd_ports_ids[i];
1336
1337                 port = &ports[pt_id];
1338                 rte_eth_stats_get(pt_id, &stats);
1339                 stats.ipackets -= port->stats.ipackets;
1340                 port->stats.ipackets = 0;
1341                 stats.opackets -= port->stats.opackets;
1342                 port->stats.opackets = 0;
1343                 stats.ibytes   -= port->stats.ibytes;
1344                 port->stats.ibytes = 0;
1345                 stats.obytes   -= port->stats.obytes;
1346                 port->stats.obytes = 0;
1347                 stats.imissed  -= port->stats.imissed;
1348                 port->stats.imissed = 0;
1349                 stats.oerrors  -= port->stats.oerrors;
1350                 port->stats.oerrors = 0;
1351                 stats.rx_nombuf -= port->stats.rx_nombuf;
1352                 port->stats.rx_nombuf = 0;
1353
1354                 total_recv += stats.ipackets;
1355                 total_xmit += stats.opackets;
1356                 total_rx_dropped += stats.imissed;
1357                 total_tx_dropped += port->tx_dropped;
1358                 total_rx_nombuf  += stats.rx_nombuf;
1359
1360                 fwd_port_stats_display(pt_id, &stats);
1361         }
1362
1363         printf("\n  %s Accumulated forward statistics for all ports"
1364                "%s\n",
1365                acc_stats_border, acc_stats_border);
1366         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1367                "%-"PRIu64"\n"
1368                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1369                "%-"PRIu64"\n",
1370                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1371                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1372         if (total_rx_nombuf > 0)
1373                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1374         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1375                "%s\n",
1376                acc_stats_border, acc_stats_border);
1377 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1378         if (total_recv > 0)
1379                 printf("\n  CPU cycles/packet=%u (total cycles="
1380                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1381                        (unsigned int)(fwd_cycles / total_recv),
1382                        fwd_cycles, total_recv);
1383 #endif
1384         printf("\nDone.\n");
1385         test_done = 1;
1386 }
1387
1388 void
1389 dev_set_link_up(portid_t pid)
1390 {
1391         if (rte_eth_dev_set_link_up(pid) < 0)
1392                 printf("\nSet link up fail.\n");
1393 }
1394
1395 void
1396 dev_set_link_down(portid_t pid)
1397 {
1398         if (rte_eth_dev_set_link_down(pid) < 0)
1399                 printf("\nSet link down fail.\n");
1400 }
1401
1402 static int
1403 all_ports_started(void)
1404 {
1405         portid_t pi;
1406         struct rte_port *port;
1407
1408         RTE_ETH_FOREACH_DEV(pi) {
1409                 port = &ports[pi];
1410                 /* Check if there is a port which is not started */
1411                 if ((port->port_status != RTE_PORT_STARTED) &&
1412                         (port->slave_flag == 0))
1413                         return 0;
1414         }
1415
1416         /* No port is not started */
1417         return 1;
1418 }
1419
1420 int
1421 port_is_stopped(portid_t port_id)
1422 {
1423         struct rte_port *port = &ports[port_id];
1424
1425         if ((port->port_status != RTE_PORT_STOPPED) &&
1426             (port->slave_flag == 0))
1427                 return 0;
1428         return 1;
1429 }
1430
1431 int
1432 all_ports_stopped(void)
1433 {
1434         portid_t pi;
1435
1436         RTE_ETH_FOREACH_DEV(pi) {
1437                 if (!port_is_stopped(pi))
1438                         return 0;
1439         }
1440
1441         return 1;
1442 }
1443
1444 int
1445 port_is_started(portid_t port_id)
1446 {
1447         if (port_id_is_invalid(port_id, ENABLED_WARN))
1448                 return 0;
1449
1450         if (ports[port_id].port_status != RTE_PORT_STARTED)
1451                 return 0;
1452
1453         return 1;
1454 }
1455
1456 static int
1457 port_is_closed(portid_t port_id)
1458 {
1459         if (port_id_is_invalid(port_id, ENABLED_WARN))
1460                 return 0;
1461
1462         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1463                 return 0;
1464
1465         return 1;
1466 }
1467
1468 int
1469 start_port(portid_t pid)
1470 {
1471         int diag, need_check_link_status = -1;
1472         portid_t pi;
1473         queueid_t qi;
1474         struct rte_port *port;
1475         struct ether_addr mac_addr;
1476         enum rte_eth_event_type event_type;
1477
1478         if (port_id_is_invalid(pid, ENABLED_WARN))
1479                 return 0;
1480
1481         if(dcb_config)
1482                 dcb_test = 1;
1483         RTE_ETH_FOREACH_DEV(pi) {
1484                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1485                         continue;
1486
1487                 need_check_link_status = 0;
1488                 port = &ports[pi];
1489                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1490                                                  RTE_PORT_HANDLING) == 0) {
1491                         printf("Port %d is now not stopped\n", pi);
1492                         continue;
1493                 }
1494
1495                 if (port->need_reconfig > 0) {
1496                         port->need_reconfig = 0;
1497
1498                         if (flow_isolate_all) {
1499                                 int ret = port_flow_isolate(pi, 1);
1500                                 if (ret) {
1501                                         printf("Failed to apply isolated"
1502                                                " mode on port %d\n", pi);
1503                                         return -1;
1504                                 }
1505                         }
1506
1507                         printf("Configuring Port %d (socket %u)\n", pi,
1508                                         port->socket_id);
1509                         /* configure port */
1510                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1511                                                 &(port->dev_conf));
1512                         if (diag != 0) {
1513                                 if (rte_atomic16_cmpset(&(port->port_status),
1514                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1515                                         printf("Port %d can not be set back "
1516                                                         "to stopped\n", pi);
1517                                 printf("Fail to configure port %d\n", pi);
1518                                 /* try to reconfigure port next time */
1519                                 port->need_reconfig = 1;
1520                                 return -1;
1521                         }
1522                 }
1523                 if (port->need_reconfig_queues > 0) {
1524                         port->need_reconfig_queues = 0;
1525                         port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1526                         /* Apply Tx offloads configuration */
1527                         port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1528                         /* setup tx queues */
1529                         for (qi = 0; qi < nb_txq; qi++) {
1530                                 if ((numa_support) &&
1531                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1532                                         diag = rte_eth_tx_queue_setup(pi, qi,
1533                                                 nb_txd,txring_numa[pi],
1534                                                 &(port->tx_conf));
1535                                 else
1536                                         diag = rte_eth_tx_queue_setup(pi, qi,
1537                                                 nb_txd,port->socket_id,
1538                                                 &(port->tx_conf));
1539
1540                                 if (diag == 0)
1541                                         continue;
1542
1543                                 /* Fail to setup tx queue, return */
1544                                 if (rte_atomic16_cmpset(&(port->port_status),
1545                                                         RTE_PORT_HANDLING,
1546                                                         RTE_PORT_STOPPED) == 0)
1547                                         printf("Port %d can not be set back "
1548                                                         "to stopped\n", pi);
1549                                 printf("Fail to configure port %d tx queues\n", pi);
1550                                 /* try to reconfigure queues next time */
1551                                 port->need_reconfig_queues = 1;
1552                                 return -1;
1553                         }
1554                         /* Apply Rx offloads configuration */
1555                         port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1556                         /* setup rx queues */
1557                         for (qi = 0; qi < nb_rxq; qi++) {
1558                                 if ((numa_support) &&
1559                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1560                                         struct rte_mempool * mp =
1561                                                 mbuf_pool_find(rxring_numa[pi]);
1562                                         if (mp == NULL) {
1563                                                 printf("Failed to setup RX queue:"
1564                                                         "No mempool allocation"
1565                                                         " on the socket %d\n",
1566                                                         rxring_numa[pi]);
1567                                                 return -1;
1568                                         }
1569
1570                                         diag = rte_eth_rx_queue_setup(pi, qi,
1571                                              nb_rxd,rxring_numa[pi],
1572                                              &(port->rx_conf),mp);
1573                                 } else {
1574                                         struct rte_mempool *mp =
1575                                                 mbuf_pool_find(port->socket_id);
1576                                         if (mp == NULL) {
1577                                                 printf("Failed to setup RX queue:"
1578                                                         "No mempool allocation"
1579                                                         " on the socket %d\n",
1580                                                         port->socket_id);
1581                                                 return -1;
1582                                         }
1583                                         diag = rte_eth_rx_queue_setup(pi, qi,
1584                                              nb_rxd,port->socket_id,
1585                                              &(port->rx_conf), mp);
1586                                 }
1587                                 if (diag == 0)
1588                                         continue;
1589
1590                                 /* Fail to setup rx queue, return */
1591                                 if (rte_atomic16_cmpset(&(port->port_status),
1592                                                         RTE_PORT_HANDLING,
1593                                                         RTE_PORT_STOPPED) == 0)
1594                                         printf("Port %d can not be set back "
1595                                                         "to stopped\n", pi);
1596                                 printf("Fail to configure port %d rx queues\n", pi);
1597                                 /* try to reconfigure queues next time */
1598                                 port->need_reconfig_queues = 1;
1599                                 return -1;
1600                         }
1601                 }
1602
1603                 /* start port */
1604                 if (rte_eth_dev_start(pi) < 0) {
1605                         printf("Fail to start port %d\n", pi);
1606
1607                         /* Fail to setup rx queue, return */
1608                         if (rte_atomic16_cmpset(&(port->port_status),
1609                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1610                                 printf("Port %d can not be set back to "
1611                                                         "stopped\n", pi);
1612                         continue;
1613                 }
1614
1615                 if (rte_atomic16_cmpset(&(port->port_status),
1616                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1617                         printf("Port %d can not be set into started\n", pi);
1618
1619                 rte_eth_macaddr_get(pi, &mac_addr);
1620                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1621                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1622                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1623                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1624
1625                 /* at least one port started, need checking link status */
1626                 need_check_link_status = 1;
1627         }
1628
1629         for (event_type = RTE_ETH_EVENT_UNKNOWN;
1630              event_type < RTE_ETH_EVENT_MAX;
1631              event_type++) {
1632                 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1633                                                 event_type,
1634                                                 eth_event_callback,
1635                                                 NULL);
1636                 if (diag) {
1637                         printf("Failed to setup even callback for event %d\n",
1638                                 event_type);
1639                         return -1;
1640                 }
1641         }
1642
1643         if (need_check_link_status == 1 && !no_link_check)
1644                 check_all_ports_link_status(RTE_PORT_ALL);
1645         else if (need_check_link_status == 0)
1646                 printf("Please stop the ports first\n");
1647
1648         printf("Done\n");
1649         return 0;
1650 }
1651
1652 void
1653 stop_port(portid_t pid)
1654 {
1655         portid_t pi;
1656         struct rte_port *port;
1657         int need_check_link_status = 0;
1658
1659         if (dcb_test) {
1660                 dcb_test = 0;
1661                 dcb_config = 0;
1662         }
1663
1664         if (port_id_is_invalid(pid, ENABLED_WARN))
1665                 return;
1666
1667         printf("Stopping ports...\n");
1668
1669         RTE_ETH_FOREACH_DEV(pi) {
1670                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1671                         continue;
1672
1673                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1674                         printf("Please remove port %d from forwarding configuration.\n", pi);
1675                         continue;
1676                 }
1677
1678                 if (port_is_bonding_slave(pi)) {
1679                         printf("Please remove port %d from bonded device.\n", pi);
1680                         continue;
1681                 }
1682
1683                 port = &ports[pi];
1684                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1685                                                 RTE_PORT_HANDLING) == 0)
1686                         continue;
1687
1688                 rte_eth_dev_stop(pi);
1689
1690                 if (rte_atomic16_cmpset(&(port->port_status),
1691                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1692                         printf("Port %d can not be set into stopped\n", pi);
1693                 need_check_link_status = 1;
1694         }
1695         if (need_check_link_status && !no_link_check)
1696                 check_all_ports_link_status(RTE_PORT_ALL);
1697
1698         printf("Done\n");
1699 }
1700
1701 void
1702 close_port(portid_t pid)
1703 {
1704         portid_t pi;
1705         struct rte_port *port;
1706
1707         if (port_id_is_invalid(pid, ENABLED_WARN))
1708                 return;
1709
1710         printf("Closing ports...\n");
1711
1712         RTE_ETH_FOREACH_DEV(pi) {
1713                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1714                         continue;
1715
1716                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1717                         printf("Please remove port %d from forwarding configuration.\n", pi);
1718                         continue;
1719                 }
1720
1721                 if (port_is_bonding_slave(pi)) {
1722                         printf("Please remove port %d from bonded device.\n", pi);
1723                         continue;
1724                 }
1725
1726                 port = &ports[pi];
1727                 if (rte_atomic16_cmpset(&(port->port_status),
1728                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1729                         printf("Port %d is already closed\n", pi);
1730                         continue;
1731                 }
1732
1733                 if (rte_atomic16_cmpset(&(port->port_status),
1734                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1735                         printf("Port %d is now not stopped\n", pi);
1736                         continue;
1737                 }
1738
1739                 if (port->flow_list)
1740                         port_flow_flush(pi);
1741                 rte_eth_dev_close(pi);
1742
1743                 if (rte_atomic16_cmpset(&(port->port_status),
1744                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1745                         printf("Port %d cannot be set to closed\n", pi);
1746         }
1747
1748         printf("Done\n");
1749 }
1750
1751 void
1752 reset_port(portid_t pid)
1753 {
1754         int diag;
1755         portid_t pi;
1756         struct rte_port *port;
1757
1758         if (port_id_is_invalid(pid, ENABLED_WARN))
1759                 return;
1760
1761         printf("Resetting ports...\n");
1762
1763         RTE_ETH_FOREACH_DEV(pi) {
1764                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1765                         continue;
1766
1767                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1768                         printf("Please remove port %d from forwarding "
1769                                "configuration.\n", pi);
1770                         continue;
1771                 }
1772
1773                 if (port_is_bonding_slave(pi)) {
1774                         printf("Please remove port %d from bonded device.\n",
1775                                pi);
1776                         continue;
1777                 }
1778
1779                 diag = rte_eth_dev_reset(pi);
1780                 if (diag == 0) {
1781                         port = &ports[pi];
1782                         port->need_reconfig = 1;
1783                         port->need_reconfig_queues = 1;
1784                 } else {
1785                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1786                 }
1787         }
1788
1789         printf("Done\n");
1790 }
1791
1792 void
1793 attach_port(char *identifier)
1794 {
1795         portid_t pi = 0;
1796         unsigned int socket_id;
1797
1798         printf("Attaching a new port...\n");
1799
1800         if (identifier == NULL) {
1801                 printf("Invalid parameters are specified\n");
1802                 return;
1803         }
1804
1805         if (rte_eth_dev_attach(identifier, &pi))
1806                 return;
1807
1808         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1809         /* if socket_id is invalid, set to 0 */
1810         if (check_socket_id(socket_id) < 0)
1811                 socket_id = 0;
1812         reconfig(pi, socket_id);
1813         rte_eth_promiscuous_enable(pi);
1814
1815         nb_ports = rte_eth_dev_count();
1816
1817         ports[pi].port_status = RTE_PORT_STOPPED;
1818
1819         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1820         printf("Done\n");
1821 }
1822
1823 void
1824 detach_port(portid_t port_id)
1825 {
1826         char name[RTE_ETH_NAME_MAX_LEN];
1827
1828         printf("Detaching a port...\n");
1829
1830         if (!port_is_closed(port_id)) {
1831                 printf("Please close port first\n");
1832                 return;
1833         }
1834
1835         if (ports[port_id].flow_list)
1836                 port_flow_flush(port_id);
1837
1838         if (rte_eth_dev_detach(port_id, name)) {
1839                 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1840                 return;
1841         }
1842
1843         nb_ports = rte_eth_dev_count();
1844
1845         printf("Port '%s' is detached. Now total ports is %d\n",
1846                         name, nb_ports);
1847         printf("Done\n");
1848         return;
1849 }
1850
1851 void
1852 pmd_test_exit(void)
1853 {
1854         portid_t pt_id;
1855
1856         if (test_done == 0)
1857                 stop_packet_forwarding();
1858
1859         if (ports != NULL) {
1860                 no_link_check = 1;
1861                 RTE_ETH_FOREACH_DEV(pt_id) {
1862                         printf("\nShutting down port %d...\n", pt_id);
1863                         fflush(stdout);
1864                         stop_port(pt_id);
1865                         close_port(pt_id);
1866                 }
1867         }
1868         printf("\nBye...\n");
1869 }
1870
1871 typedef void (*cmd_func_t)(void);
1872 struct pmd_test_command {
1873         const char *cmd_name;
1874         cmd_func_t cmd_func;
1875 };
1876
1877 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1878
1879 /* Check the link status of all ports in up to 9s, and print them finally */
1880 static void
1881 check_all_ports_link_status(uint32_t port_mask)
1882 {
1883 #define CHECK_INTERVAL 100 /* 100ms */
1884 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1885         portid_t portid;
1886         uint8_t count, all_ports_up, print_flag = 0;
1887         struct rte_eth_link link;
1888
1889         printf("Checking link statuses...\n");
1890         fflush(stdout);
1891         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1892                 all_ports_up = 1;
1893                 RTE_ETH_FOREACH_DEV(portid) {
1894                         if ((port_mask & (1 << portid)) == 0)
1895                                 continue;
1896                         memset(&link, 0, sizeof(link));
1897                         rte_eth_link_get_nowait(portid, &link);
1898                         /* print link status if flag set */
1899                         if (print_flag == 1) {
1900                                 if (link.link_status)
1901                                         printf(
1902                                         "Port%d Link Up. speed %u Mbps- %s\n",
1903                                         portid, link.link_speed,
1904                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1905                                         ("full-duplex") : ("half-duplex\n"));
1906                                 else
1907                                         printf("Port %d Link Down\n", portid);
1908                                 continue;
1909                         }
1910                         /* clear all_ports_up flag if any link down */
1911                         if (link.link_status == ETH_LINK_DOWN) {
1912                                 all_ports_up = 0;
1913                                 break;
1914                         }
1915                 }
1916                 /* after finally printing all link status, get out */
1917                 if (print_flag == 1)
1918                         break;
1919
1920                 if (all_ports_up == 0) {
1921                         fflush(stdout);
1922                         rte_delay_ms(CHECK_INTERVAL);
1923                 }
1924
1925                 /* set the print_flag if all ports up or timeout */
1926                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1927                         print_flag = 1;
1928                 }
1929
1930                 if (lsc_interrupt)
1931                         break;
1932         }
1933 }
1934
1935 static void
1936 rmv_event_callback(void *arg)
1937 {
1938         struct rte_eth_dev *dev;
1939         portid_t port_id = (intptr_t)arg;
1940
1941         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1942         dev = &rte_eth_devices[port_id];
1943
1944         stop_port(port_id);
1945         close_port(port_id);
1946         printf("removing device %s\n", dev->device->name);
1947         if (rte_eal_dev_detach(dev->device))
1948                 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1949                         dev->device->name);
1950 }
1951
1952 /* This function is used by the interrupt thread */
1953 static int
1954 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1955                   void *ret_param)
1956 {
1957         static const char * const event_desc[] = {
1958                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1959                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1960                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1961                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1962                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1963                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1964                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1965                 [RTE_ETH_EVENT_NEW] = "device probed",
1966                 [RTE_ETH_EVENT_DESTROY] = "device released",
1967                 [RTE_ETH_EVENT_MAX] = NULL,
1968         };
1969
1970         RTE_SET_USED(param);
1971         RTE_SET_USED(ret_param);
1972
1973         if (type >= RTE_ETH_EVENT_MAX) {
1974                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1975                         port_id, __func__, type);
1976                 fflush(stderr);
1977         } else if (event_print_mask & (UINT32_C(1) << type)) {
1978                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1979                         event_desc[type]);
1980                 fflush(stdout);
1981         }
1982
1983         switch (type) {
1984         case RTE_ETH_EVENT_INTR_RMV:
1985                 if (rte_eal_alarm_set(100000,
1986                                 rmv_event_callback, (void *)(intptr_t)port_id))
1987                         fprintf(stderr, "Could not set up deferred device removal\n");
1988                 break;
1989         default:
1990                 break;
1991         }
1992         return 0;
1993 }
1994
1995 static int
1996 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1997 {
1998         uint16_t i;
1999         int diag;
2000         uint8_t mapping_found = 0;
2001
2002         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2003                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2004                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2005                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2006                                         tx_queue_stats_mappings[i].queue_id,
2007                                         tx_queue_stats_mappings[i].stats_counter_id);
2008                         if (diag != 0)
2009                                 return diag;
2010                         mapping_found = 1;
2011                 }
2012         }
2013         if (mapping_found)
2014                 port->tx_queue_stats_mapping_enabled = 1;
2015         return 0;
2016 }
2017
2018 static int
2019 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2020 {
2021         uint16_t i;
2022         int diag;
2023         uint8_t mapping_found = 0;
2024
2025         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2026                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2027                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2028                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2029                                         rx_queue_stats_mappings[i].queue_id,
2030                                         rx_queue_stats_mappings[i].stats_counter_id);
2031                         if (diag != 0)
2032                                 return diag;
2033                         mapping_found = 1;
2034                 }
2035         }
2036         if (mapping_found)
2037                 port->rx_queue_stats_mapping_enabled = 1;
2038         return 0;
2039 }
2040
2041 static void
2042 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2043 {
2044         int diag = 0;
2045
2046         diag = set_tx_queue_stats_mapping_registers(pi, port);
2047         if (diag != 0) {
2048                 if (diag == -ENOTSUP) {
2049                         port->tx_queue_stats_mapping_enabled = 0;
2050                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2051                 }
2052                 else
2053                         rte_exit(EXIT_FAILURE,
2054                                         "set_tx_queue_stats_mapping_registers "
2055                                         "failed for port id=%d diag=%d\n",
2056                                         pi, diag);
2057         }
2058
2059         diag = set_rx_queue_stats_mapping_registers(pi, port);
2060         if (diag != 0) {
2061                 if (diag == -ENOTSUP) {
2062                         port->rx_queue_stats_mapping_enabled = 0;
2063                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2064                 }
2065                 else
2066                         rte_exit(EXIT_FAILURE,
2067                                         "set_rx_queue_stats_mapping_registers "
2068                                         "failed for port id=%d diag=%d\n",
2069                                         pi, diag);
2070         }
2071 }
2072
2073 static void
2074 rxtx_port_config(struct rte_port *port)
2075 {
2076         port->rx_conf = port->dev_info.default_rxconf;
2077         port->tx_conf = port->dev_info.default_txconf;
2078
2079         /* Check if any RX/TX parameters have been passed */
2080         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2081                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2082
2083         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2084                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2085
2086         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2087                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2088
2089         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2090                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2091
2092         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2093                 port->rx_conf.rx_drop_en = rx_drop_en;
2094
2095         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2096                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2097
2098         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2099                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2100
2101         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2102                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2103
2104         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2105                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2106
2107         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2108                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2109 }
2110
2111 void
2112 init_port_config(void)
2113 {
2114         portid_t pid;
2115         struct rte_port *port;
2116
2117         RTE_ETH_FOREACH_DEV(pid) {
2118                 port = &ports[pid];
2119                 port->dev_conf.fdir_conf = fdir_conf;
2120                 if (nb_rxq > 1) {
2121                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2122                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2123                 } else {
2124                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2125                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2126                 }
2127
2128                 if (port->dcb_flag == 0) {
2129                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2130                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2131                         else
2132                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2133                 }
2134
2135                 rxtx_port_config(port);
2136
2137                 rte_eth_macaddr_get(pid, &port->eth_addr);
2138
2139                 map_port_queue_stats_mapping_registers(pid, port);
2140 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2141                 rte_pmd_ixgbe_bypass_init(pid);
2142 #endif
2143
2144                 if (lsc_interrupt &&
2145                     (rte_eth_devices[pid].data->dev_flags &
2146                      RTE_ETH_DEV_INTR_LSC))
2147                         port->dev_conf.intr_conf.lsc = 1;
2148                 if (rmv_interrupt &&
2149                     (rte_eth_devices[pid].data->dev_flags &
2150                      RTE_ETH_DEV_INTR_RMV))
2151                         port->dev_conf.intr_conf.rmv = 1;
2152
2153 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2154                 /* Detect softnic port */
2155                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2156                         port->softnic_enable = 1;
2157                         memset(&port->softport, 0, sizeof(struct softnic_port));
2158
2159                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2160                                 port->softport.tm_flag = 1;
2161                 }
2162 #endif
2163         }
2164 }
2165
2166 void set_port_slave_flag(portid_t slave_pid)
2167 {
2168         struct rte_port *port;
2169
2170         port = &ports[slave_pid];
2171         port->slave_flag = 1;
2172 }
2173
2174 void clear_port_slave_flag(portid_t slave_pid)
2175 {
2176         struct rte_port *port;
2177
2178         port = &ports[slave_pid];
2179         port->slave_flag = 0;
2180 }
2181
2182 uint8_t port_is_bonding_slave(portid_t slave_pid)
2183 {
2184         struct rte_port *port;
2185
2186         port = &ports[slave_pid];
2187         return port->slave_flag;
2188 }
2189
2190 const uint16_t vlan_tags[] = {
2191                 0,  1,  2,  3,  4,  5,  6,  7,
2192                 8,  9, 10, 11,  12, 13, 14, 15,
2193                 16, 17, 18, 19, 20, 21, 22, 23,
2194                 24, 25, 26, 27, 28, 29, 30, 31
2195 };
2196
2197 static  int
2198 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2199                  enum dcb_mode_enable dcb_mode,
2200                  enum rte_eth_nb_tcs num_tcs,
2201                  uint8_t pfc_en)
2202 {
2203         uint8_t i;
2204
2205         /*
2206          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2207          * given above, and the number of traffic classes available for use.
2208          */
2209         if (dcb_mode == DCB_VT_ENABLED) {
2210                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2211                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2212                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2213                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2214
2215                 /* VMDQ+DCB RX and TX configurations */
2216                 vmdq_rx_conf->enable_default_pool = 0;
2217                 vmdq_rx_conf->default_pool = 0;
2218                 vmdq_rx_conf->nb_queue_pools =
2219                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2220                 vmdq_tx_conf->nb_queue_pools =
2221                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2222
2223                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2224                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2225                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2226                         vmdq_rx_conf->pool_map[i].pools =
2227                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2228                 }
2229                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2230                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2231                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2232                 }
2233
2234                 /* set DCB mode of RX and TX of multiple queues */
2235                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2236                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2237         } else {
2238                 struct rte_eth_dcb_rx_conf *rx_conf =
2239                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2240                 struct rte_eth_dcb_tx_conf *tx_conf =
2241                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2242
2243                 rx_conf->nb_tcs = num_tcs;
2244                 tx_conf->nb_tcs = num_tcs;
2245
2246                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2247                         rx_conf->dcb_tc[i] = i % num_tcs;
2248                         tx_conf->dcb_tc[i] = i % num_tcs;
2249                 }
2250                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2251                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2252                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2253         }
2254
2255         if (pfc_en)
2256                 eth_conf->dcb_capability_en =
2257                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2258         else
2259                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2260
2261         return 0;
2262 }
2263
2264 int
2265 init_port_dcb_config(portid_t pid,
2266                      enum dcb_mode_enable dcb_mode,
2267                      enum rte_eth_nb_tcs num_tcs,
2268                      uint8_t pfc_en)
2269 {
2270         struct rte_eth_conf port_conf;
2271         struct rte_port *rte_port;
2272         int retval;
2273         uint16_t i;
2274
2275         rte_port = &ports[pid];
2276
2277         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2278         /* Enter DCB configuration status */
2279         dcb_config = 1;
2280
2281         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2282         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2283         if (retval < 0)
2284                 return retval;
2285         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2286
2287         /**
2288          * Write the configuration into the device.
2289          * Set the numbers of RX & TX queues to 0, so
2290          * the RX & TX queues will not be setup.
2291          */
2292         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2293
2294         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2295
2296         /* If dev_info.vmdq_pool_base is greater than 0,
2297          * the queue id of vmdq pools is started after pf queues.
2298          */
2299         if (dcb_mode == DCB_VT_ENABLED &&
2300             rte_port->dev_info.vmdq_pool_base > 0) {
2301                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2302                         " for port %d.", pid);
2303                 return -1;
2304         }
2305
2306         /* Assume the ports in testpmd have the same dcb capability
2307          * and has the same number of rxq and txq in dcb mode
2308          */
2309         if (dcb_mode == DCB_VT_ENABLED) {
2310                 if (rte_port->dev_info.max_vfs > 0) {
2311                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2312                         nb_txq = rte_port->dev_info.nb_tx_queues;
2313                 } else {
2314                         nb_rxq = rte_port->dev_info.max_rx_queues;
2315                         nb_txq = rte_port->dev_info.max_tx_queues;
2316                 }
2317         } else {
2318                 /*if vt is disabled, use all pf queues */
2319                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2320                         nb_rxq = rte_port->dev_info.max_rx_queues;
2321                         nb_txq = rte_port->dev_info.max_tx_queues;
2322                 } else {
2323                         nb_rxq = (queueid_t)num_tcs;
2324                         nb_txq = (queueid_t)num_tcs;
2325
2326                 }
2327         }
2328         rx_free_thresh = 64;
2329
2330         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2331
2332         rxtx_port_config(rte_port);
2333         /* VLAN filter */
2334         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2335         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2336                 rx_vft_set(pid, vlan_tags[i], 1);
2337
2338         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2339         map_port_queue_stats_mapping_registers(pid, rte_port);
2340
2341         rte_port->dcb_flag = 1;
2342
2343         return 0;
2344 }
2345
2346 static void
2347 init_port(void)
2348 {
2349         /* Configuration of Ethernet ports. */
2350         ports = rte_zmalloc("testpmd: ports",
2351                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2352                             RTE_CACHE_LINE_SIZE);
2353         if (ports == NULL) {
2354                 rte_exit(EXIT_FAILURE,
2355                                 "rte_zmalloc(%d struct rte_port) failed\n",
2356                                 RTE_MAX_ETHPORTS);
2357         }
2358 }
2359
2360 static void
2361 force_quit(void)
2362 {
2363         pmd_test_exit();
2364         prompt_exit();
2365 }
2366
2367 static void
2368 print_stats(void)
2369 {
2370         uint8_t i;
2371         const char clr[] = { 27, '[', '2', 'J', '\0' };
2372         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2373
2374         /* Clear screen and move to top left */
2375         printf("%s%s", clr, top_left);
2376
2377         printf("\nPort statistics ====================================");
2378         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2379                 nic_stats_display(fwd_ports_ids[i]);
2380 }
2381
2382 static void
2383 signal_handler(int signum)
2384 {
2385         if (signum == SIGINT || signum == SIGTERM) {
2386                 printf("\nSignal %d received, preparing to exit...\n",
2387                                 signum);
2388 #ifdef RTE_LIBRTE_PDUMP
2389                 /* uninitialize packet capture framework */
2390                 rte_pdump_uninit();
2391 #endif
2392 #ifdef RTE_LIBRTE_LATENCY_STATS
2393                 rte_latencystats_uninit();
2394 #endif
2395                 force_quit();
2396                 /* Set flag to indicate the force termination. */
2397                 f_quit = 1;
2398                 /* exit with the expected status */
2399                 signal(signum, SIG_DFL);
2400                 kill(getpid(), signum);
2401         }
2402 }
2403
2404 int
2405 main(int argc, char** argv)
2406 {
2407         int  diag;
2408         portid_t port_id;
2409
2410         signal(SIGINT, signal_handler);
2411         signal(SIGTERM, signal_handler);
2412
2413         diag = rte_eal_init(argc, argv);
2414         if (diag < 0)
2415                 rte_panic("Cannot init EAL\n");
2416
2417         testpmd_logtype = rte_log_register("testpmd");
2418         if (testpmd_logtype < 0)
2419                 rte_panic("Cannot register log type");
2420         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2421
2422         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2423                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2424                         strerror(errno));
2425         }
2426
2427 #ifdef RTE_LIBRTE_PDUMP
2428         /* initialize packet capture framework */
2429         rte_pdump_init(NULL);
2430 #endif
2431
2432         nb_ports = (portid_t) rte_eth_dev_count();
2433         if (nb_ports == 0)
2434                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2435
2436         /* allocate port structures, and init them */
2437         init_port();
2438
2439         set_def_fwd_config();
2440         if (nb_lcores == 0)
2441                 rte_panic("Empty set of forwarding logical cores - check the "
2442                           "core mask supplied in the command parameters\n");
2443
2444         /* Bitrate/latency stats disabled by default */
2445 #ifdef RTE_LIBRTE_BITRATE
2446         bitrate_enabled = 0;
2447 #endif
2448 #ifdef RTE_LIBRTE_LATENCY_STATS
2449         latencystats_enabled = 0;
2450 #endif
2451
2452         argc -= diag;
2453         argv += diag;
2454         if (argc > 1)
2455                 launch_args_parse(argc, argv);
2456
2457         if (tx_first && interactive)
2458                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2459                                 "interactive mode.\n");
2460
2461         if (tx_first && lsc_interrupt) {
2462                 printf("Warning: lsc_interrupt needs to be off when "
2463                                 " using tx_first. Disabling.\n");
2464                 lsc_interrupt = 0;
2465         }
2466
2467         if (!nb_rxq && !nb_txq)
2468                 printf("Warning: Either rx or tx queues should be non-zero\n");
2469
2470         if (nb_rxq > 1 && nb_rxq > nb_txq)
2471                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2472                        "but nb_txq=%d will prevent to fully test it.\n",
2473                        nb_rxq, nb_txq);
2474
2475         init_config();
2476         if (start_port(RTE_PORT_ALL) != 0)
2477                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2478
2479         /* set all ports to promiscuous mode by default */
2480         RTE_ETH_FOREACH_DEV(port_id)
2481                 rte_eth_promiscuous_enable(port_id);
2482
2483         /* Init metrics library */
2484         rte_metrics_init(rte_socket_id());
2485
2486 #ifdef RTE_LIBRTE_LATENCY_STATS
2487         if (latencystats_enabled != 0) {
2488                 int ret = rte_latencystats_init(1, NULL);
2489                 if (ret)
2490                         printf("Warning: latencystats init()"
2491                                 " returned error %d\n", ret);
2492                 printf("Latencystats running on lcore %d\n",
2493                         latencystats_lcore_id);
2494         }
2495 #endif
2496
2497         /* Setup bitrate stats */
2498 #ifdef RTE_LIBRTE_BITRATE
2499         if (bitrate_enabled != 0) {
2500                 bitrate_data = rte_stats_bitrate_create();
2501                 if (bitrate_data == NULL)
2502                         rte_exit(EXIT_FAILURE,
2503                                 "Could not allocate bitrate data.\n");
2504                 rte_stats_bitrate_reg(bitrate_data);
2505         }
2506 #endif
2507
2508 #ifdef RTE_LIBRTE_CMDLINE
2509         if (strlen(cmdline_filename) != 0)
2510                 cmdline_read_from_file(cmdline_filename);
2511
2512         if (interactive == 1) {
2513                 if (auto_start) {
2514                         printf("Start automatic packet forwarding\n");
2515                         start_packet_forwarding(0);
2516                 }
2517                 prompt();
2518                 pmd_test_exit();
2519         } else
2520 #endif
2521         {
2522                 char c;
2523                 int rc;
2524
2525                 f_quit = 0;
2526
2527                 printf("No commandline core given, start packet forwarding\n");
2528                 start_packet_forwarding(tx_first);
2529                 if (stats_period != 0) {
2530                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2531                         uint64_t timer_period;
2532
2533                         /* Convert to number of cycles */
2534                         timer_period = stats_period * rte_get_timer_hz();
2535
2536                         while (f_quit == 0) {
2537                                 cur_time = rte_get_timer_cycles();
2538                                 diff_time += cur_time - prev_time;
2539
2540                                 if (diff_time >= timer_period) {
2541                                         print_stats();
2542                                         /* Reset the timer */
2543                                         diff_time = 0;
2544                                 }
2545                                 /* Sleep to avoid unnecessary checks */
2546                                 prev_time = cur_time;
2547                                 sleep(1);
2548                         }
2549                 }
2550
2551                 printf("Press enter to exit\n");
2552                 rc = read(0, &c, 1);
2553                 pmd_test_exit();
2554                 if (rc < 0)
2555                         return 1;
2556         }
2557
2558         return 0;
2559 }