app/testpmd: check not configuring port twice
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64
65 #include "testpmd.h"
66
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80
81 #define EXTMEM_HEAP_NAME "extmem"
82
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;        /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173         &io_fwd_engine,
174         &mac_fwd_engine,
175         &mac_swap_engine,
176         &flow_gen_engine,
177         &rx_only_engine,
178         &tx_only_engine,
179         &csum_fwd_engine,
180         &icmp_echo_engine,
181         &noisy_vnf_engine,
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
183         &softnic_fwd_engine,
184 #endif
185 #ifdef RTE_LIBRTE_IEEE1588
186         &ieee1588_fwd_engine,
187 #endif
188         NULL,
189 };
190
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199                                       * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201
202 /*
203  * In container, it cannot terminate the process which running with 'stats-period'
204  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205  */
206 uint8_t f_quit;
207
208 /*
209  * Configuration of packet segments used by the "txonly" processing engine.
210  */
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213         TXONLY_DEF_PACKET_LEN,
214 };
215 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
219
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
225
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
228
229 /*
230  * Configurable number of RX/TX queues.
231  */
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234
235 /*
236  * Configurable number of RX/TX ring descriptors.
237  * Defaults are supplied by drivers via ethdev.
238  */
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243
244 #define RTE_PMD_PARAM_UNSET -1
245 /*
246  * Configurable values of RX and TX ring threshold registers.
247  */
248
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256
257 /*
258  * Configurable value of RX free threshold.
259  */
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261
262 /*
263  * Configurable value of RX drop enable.
264  */
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266
267 /*
268  * Configurable value of TX free threshold.
269  */
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271
272 /*
273  * Configurable value of TX RS bit threshold.
274  */
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276
277 /*
278  * Configurable value of buffered packets before sending.
279  */
280 uint16_t noisy_tx_sw_bufsz;
281
282 /*
283  * Configurable value of packet buffer timeout.
284  */
285 uint16_t noisy_tx_sw_buf_flush_time;
286
287 /*
288  * Configurable value for size of VNF internal memory area
289  * used for simulating noisy neighbour behaviour
290  */
291 uint64_t noisy_lkup_mem_sz;
292
293 /*
294  * Configurable value of number of random writes done in
295  * VNF simulation memory area.
296  */
297 uint64_t noisy_lkup_num_writes;
298
299 /*
300  * Configurable value of number of random reads done in
301  * VNF simulation memory area.
302  */
303 uint64_t noisy_lkup_num_reads;
304
305 /*
306  * Configurable value of number of random reads/writes done in
307  * VNF simulation memory area.
308  */
309 uint64_t noisy_lkup_num_reads_writes;
310
311 /*
312  * Receive Side Scaling (RSS) configuration.
313  */
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315
316 /*
317  * Port topology configuration
318  */
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320
321 /*
322  * Avoids to flush all the RX streams before starts forwarding.
323  */
324 uint8_t no_flush_rx = 0; /* flush by default */
325
326 /*
327  * Flow API isolated mode.
328  */
329 uint8_t flow_isolate_all;
330
331 /*
332  * Avoids to check link status when starting/stopping a port.
333  */
334 uint8_t no_link_check = 0; /* check by default */
335
336 /*
337  * Enable link status change notification
338  */
339 uint8_t lsc_interrupt = 1; /* enabled by default */
340
341 /*
342  * Enable device removal notification.
343  */
344 uint8_t rmv_interrupt = 1; /* enabled by default */
345
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347
348 /*
349  * Display or mask ether events
350  * Default to all events except VF_MBOX
351  */
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356                             (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
359 /*
360  * Decide if all memory are locked for performance.
361  */
362 int do_mlockall = 0;
363
364 /*
365  * NIC bypass mode configuration options.
366  */
367
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
371 #endif
372
373
374 #ifdef RTE_LIBRTE_LATENCY_STATS
375
376 /*
377  * Set when latency stats is enabled in the commandline
378  */
379 uint8_t latencystats_enabled;
380
381 /*
382  * Lcore ID to serive latency statistics.
383  */
384 lcoreid_t latencystats_lcore_id = -1;
385
386 #endif
387
388 /*
389  * Ethernet device configuration.
390  */
391 struct rte_eth_rxmode rx_mode = {
392         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
393 };
394
395 struct rte_eth_txmode tx_mode = {
396         .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
397 };
398
399 struct rte_fdir_conf fdir_conf = {
400         .mode = RTE_FDIR_MODE_NONE,
401         .pballoc = RTE_FDIR_PBALLOC_64K,
402         .status = RTE_FDIR_REPORT_STATUS,
403         .mask = {
404                 .vlan_tci_mask = 0xFFEF,
405                 .ipv4_mask     = {
406                         .src_ip = 0xFFFFFFFF,
407                         .dst_ip = 0xFFFFFFFF,
408                 },
409                 .ipv6_mask     = {
410                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
412                 },
413                 .src_port_mask = 0xFFFF,
414                 .dst_port_mask = 0xFFFF,
415                 .mac_addr_byte_mask = 0xFF,
416                 .tunnel_type_mask = 1,
417                 .tunnel_id_mask = 0xFFFFFFFF,
418         },
419         .drop_queue = 127,
420 };
421
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
423
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
426
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
429
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
432
433 /*
434  * Display zero values by default for xstats
435  */
436 uint8_t xstats_hide_zero;
437
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
440
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
446 #endif
447
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
450
451 struct vxlan_encap_conf vxlan_encap_conf = {
452         .select_ipv4 = 1,
453         .select_vlan = 0,
454         .vni = "\x00\x00\x00",
455         .udp_src = 0,
456         .udp_dst = RTE_BE16(4789),
457         .ipv4_src = IPv4(127, 0, 0, 1),
458         .ipv4_dst = IPv4(255, 255, 255, 255),
459         .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460                 "\x00\x00\x00\x00\x00\x00\x00\x01",
461         .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462                 "\x00\x00\x00\x00\x00\x00\x11\x11",
463         .vlan_tci = 0,
464         .eth_src = "\x00\x00\x00\x00\x00\x00",
465         .eth_dst = "\xff\xff\xff\xff\xff\xff",
466 };
467
468 struct nvgre_encap_conf nvgre_encap_conf = {
469         .select_ipv4 = 1,
470         .select_vlan = 0,
471         .tni = "\x00\x00\x00",
472         .ipv4_src = IPv4(127, 0, 0, 1),
473         .ipv4_dst = IPv4(255, 255, 255, 255),
474         .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475                 "\x00\x00\x00\x00\x00\x00\x00\x01",
476         .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477                 "\x00\x00\x00\x00\x00\x00\x11\x11",
478         .vlan_tci = 0,
479         .eth_src = "\x00\x00\x00\x00\x00\x00",
480         .eth_dst = "\xff\xff\xff\xff\xff\xff",
481 };
482
483 /* Forward function declarations */
484 static void setup_attached_port(portid_t pi);
485 static void map_port_queue_stats_mapping_registers(portid_t pi,
486                                                    struct rte_port *port);
487 static void check_all_ports_link_status(uint32_t port_mask);
488 static int eth_event_callback(portid_t port_id,
489                               enum rte_eth_event_type type,
490                               void *param, void *ret_param);
491 static void eth_dev_event_callback(const char *device_name,
492                                 enum rte_dev_event_type type,
493                                 void *param);
494
495 /*
496  * Check if all the ports are started.
497  * If yes, return positive value. If not, return zero.
498  */
499 static int all_ports_started(void);
500
501 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
502 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
503
504 /*
505  * Helper function to check if socket is already discovered.
506  * If yes, return positive value. If not, return zero.
507  */
508 int
509 new_socket_id(unsigned int socket_id)
510 {
511         unsigned int i;
512
513         for (i = 0; i < num_sockets; i++) {
514                 if (socket_ids[i] == socket_id)
515                         return 0;
516         }
517         return 1;
518 }
519
520 /*
521  * Setup default configuration.
522  */
523 static void
524 set_default_fwd_lcores_config(void)
525 {
526         unsigned int i;
527         unsigned int nb_lc;
528         unsigned int sock_num;
529
530         nb_lc = 0;
531         for (i = 0; i < RTE_MAX_LCORE; i++) {
532                 if (!rte_lcore_is_enabled(i))
533                         continue;
534                 sock_num = rte_lcore_to_socket_id(i);
535                 if (new_socket_id(sock_num)) {
536                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
537                                 rte_exit(EXIT_FAILURE,
538                                          "Total sockets greater than %u\n",
539                                          RTE_MAX_NUMA_NODES);
540                         }
541                         socket_ids[num_sockets++] = sock_num;
542                 }
543                 if (i == rte_get_master_lcore())
544                         continue;
545                 fwd_lcores_cpuids[nb_lc++] = i;
546         }
547         nb_lcores = (lcoreid_t) nb_lc;
548         nb_cfg_lcores = nb_lcores;
549         nb_fwd_lcores = 1;
550 }
551
552 static void
553 set_def_peer_eth_addrs(void)
554 {
555         portid_t i;
556
557         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
558                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
559                 peer_eth_addrs[i].addr_bytes[5] = i;
560         }
561 }
562
563 static void
564 set_default_fwd_ports_config(void)
565 {
566         portid_t pt_id;
567         int i = 0;
568
569         RTE_ETH_FOREACH_DEV(pt_id) {
570                 fwd_ports_ids[i++] = pt_id;
571
572                 /* Update sockets info according to the attached device */
573                 int socket_id = rte_eth_dev_socket_id(pt_id);
574                 if (socket_id >= 0 && new_socket_id(socket_id)) {
575                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
576                                 rte_exit(EXIT_FAILURE,
577                                          "Total sockets greater than %u\n",
578                                          RTE_MAX_NUMA_NODES);
579                         }
580                         socket_ids[num_sockets++] = socket_id;
581                 }
582         }
583
584         nb_cfg_ports = nb_ports;
585         nb_fwd_ports = nb_ports;
586 }
587
588 void
589 set_def_fwd_config(void)
590 {
591         set_default_fwd_lcores_config();
592         set_def_peer_eth_addrs();
593         set_default_fwd_ports_config();
594 }
595
596 /* extremely pessimistic estimation of memory required to create a mempool */
597 static int
598 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
599 {
600         unsigned int n_pages, mbuf_per_pg, leftover;
601         uint64_t total_mem, mbuf_mem, obj_sz;
602
603         /* there is no good way to predict how much space the mempool will
604          * occupy because it will allocate chunks on the fly, and some of those
605          * will come from default DPDK memory while some will come from our
606          * external memory, so just assume 128MB will be enough for everyone.
607          */
608         uint64_t hdr_mem = 128 << 20;
609
610         /* account for possible non-contiguousness */
611         obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
612         if (obj_sz > pgsz) {
613                 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
614                 return -1;
615         }
616
617         mbuf_per_pg = pgsz / obj_sz;
618         leftover = (nb_mbufs % mbuf_per_pg) > 0;
619         n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
620
621         mbuf_mem = n_pages * pgsz;
622
623         total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
624
625         if (total_mem > SIZE_MAX) {
626                 TESTPMD_LOG(ERR, "Memory size too big\n");
627                 return -1;
628         }
629         *out = (size_t)total_mem;
630
631         return 0;
632 }
633
634 static inline uint32_t
635 bsf64(uint64_t v)
636 {
637         return (uint32_t)__builtin_ctzll(v);
638 }
639
640 static inline uint32_t
641 log2_u64(uint64_t v)
642 {
643         if (v == 0)
644                 return 0;
645         v = rte_align64pow2(v);
646         return bsf64(v);
647 }
648
649 static int
650 pagesz_flags(uint64_t page_sz)
651 {
652         /* as per mmap() manpage, all page sizes are log2 of page size
653          * shifted by MAP_HUGE_SHIFT
654          */
655         int log2 = log2_u64(page_sz);
656
657         return (log2 << HUGE_SHIFT);
658 }
659
660 static void *
661 alloc_mem(size_t memsz, size_t pgsz, bool huge)
662 {
663         void *addr;
664         int flags;
665
666         /* allocate anonymous hugepages */
667         flags = MAP_ANONYMOUS | MAP_PRIVATE;
668         if (huge)
669                 flags |= HUGE_FLAG | pagesz_flags(pgsz);
670
671         addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
672         if (addr == MAP_FAILED)
673                 return NULL;
674
675         return addr;
676 }
677
678 struct extmem_param {
679         void *addr;
680         size_t len;
681         size_t pgsz;
682         rte_iova_t *iova_table;
683         unsigned int iova_table_len;
684 };
685
686 static int
687 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
688                 bool huge)
689 {
690         uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
691                         RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
692         unsigned int cur_page, n_pages, pgsz_idx;
693         size_t mem_sz, cur_pgsz;
694         rte_iova_t *iovas = NULL;
695         void *addr;
696         int ret;
697
698         for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
699                 /* skip anything that is too big */
700                 if (pgsizes[pgsz_idx] > SIZE_MAX)
701                         continue;
702
703                 cur_pgsz = pgsizes[pgsz_idx];
704
705                 /* if we were told not to allocate hugepages, override */
706                 if (!huge)
707                         cur_pgsz = sysconf(_SC_PAGESIZE);
708
709                 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
710                 if (ret < 0) {
711                         TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712                         return -1;
713                 }
714
715                 /* allocate our memory */
716                 addr = alloc_mem(mem_sz, cur_pgsz, huge);
717
718                 /* if we couldn't allocate memory with a specified page size,
719                  * that doesn't mean we can't do it with other page sizes, so
720                  * try another one.
721                  */
722                 if (addr == NULL)
723                         continue;
724
725                 /* store IOVA addresses for every page in this memory area */
726                 n_pages = mem_sz / cur_pgsz;
727
728                 iovas = malloc(sizeof(*iovas) * n_pages);
729
730                 if (iovas == NULL) {
731                         TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
732                         goto fail;
733                 }
734                 /* lock memory if it's not huge pages */
735                 if (!huge)
736                         mlock(addr, mem_sz);
737
738                 /* populate IOVA addresses */
739                 for (cur_page = 0; cur_page < n_pages; cur_page++) {
740                         rte_iova_t iova;
741                         size_t offset;
742                         void *cur;
743
744                         offset = cur_pgsz * cur_page;
745                         cur = RTE_PTR_ADD(addr, offset);
746
747                         /* touch the page before getting its IOVA */
748                         *(volatile char *)cur = 0;
749
750                         iova = rte_mem_virt2iova(cur);
751
752                         iovas[cur_page] = iova;
753                 }
754
755                 break;
756         }
757         /* if we couldn't allocate anything */
758         if (iovas == NULL)
759                 return -1;
760
761         param->addr = addr;
762         param->len = mem_sz;
763         param->pgsz = cur_pgsz;
764         param->iova_table = iovas;
765         param->iova_table_len = n_pages;
766
767         return 0;
768 fail:
769         if (iovas)
770                 free(iovas);
771         if (addr)
772                 munmap(addr, mem_sz);
773
774         return -1;
775 }
776
777 static int
778 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
779 {
780         struct extmem_param param;
781         int socket_id, ret;
782
783         memset(&param, 0, sizeof(param));
784
785         /* check if our heap exists */
786         socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
787         if (socket_id < 0) {
788                 /* create our heap */
789                 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
790                 if (ret < 0) {
791                         TESTPMD_LOG(ERR, "Cannot create heap\n");
792                         return -1;
793                 }
794         }
795
796         ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
797         if (ret < 0) {
798                 TESTPMD_LOG(ERR, "Cannot create memory area\n");
799                 return -1;
800         }
801
802         /* we now have a valid memory area, so add it to heap */
803         ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
804                         param.addr, param.len, param.iova_table,
805                         param.iova_table_len, param.pgsz);
806
807         /* when using VFIO, memory is automatically mapped for DMA by EAL */
808
809         /* not needed any more */
810         free(param.iova_table);
811
812         if (ret < 0) {
813                 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
814                 munmap(param.addr, param.len);
815                 return -1;
816         }
817
818         /* success */
819
820         TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
821                         param.len >> 20);
822
823         return 0;
824 }
825
826 /*
827  * Configuration initialisation done once at init time.
828  */
829 static void
830 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
831                  unsigned int socket_id)
832 {
833         char pool_name[RTE_MEMPOOL_NAMESIZE];
834         struct rte_mempool *rte_mp = NULL;
835         uint32_t mb_size;
836
837         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
838         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
839
840         TESTPMD_LOG(INFO,
841                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
842                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
843
844         switch (mp_alloc_type) {
845         case MP_ALLOC_NATIVE:
846                 {
847                         /* wrapper to rte_mempool_create() */
848                         TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
849                                         rte_mbuf_best_mempool_ops());
850                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
851                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
852                         break;
853                 }
854         case MP_ALLOC_ANON:
855                 {
856                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
857                                 mb_size, (unsigned int) mb_mempool_cache,
858                                 sizeof(struct rte_pktmbuf_pool_private),
859                                 socket_id, 0);
860                         if (rte_mp == NULL)
861                                 goto err;
862
863                         if (rte_mempool_populate_anon(rte_mp) == 0) {
864                                 rte_mempool_free(rte_mp);
865                                 rte_mp = NULL;
866                                 goto err;
867                         }
868                         rte_pktmbuf_pool_init(rte_mp, NULL);
869                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
870                         break;
871                 }
872         case MP_ALLOC_XMEM:
873         case MP_ALLOC_XMEM_HUGE:
874                 {
875                         int heap_socket;
876                         bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
877
878                         if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
879                                 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
880
881                         heap_socket =
882                                 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
883                         if (heap_socket < 0)
884                                 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
885
886                         TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
887                                         rte_mbuf_best_mempool_ops());
888                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
889                                         mb_mempool_cache, 0, mbuf_seg_size,
890                                         heap_socket);
891                         break;
892                 }
893         default:
894                 {
895                         rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
896                 }
897         }
898
899 err:
900         if (rte_mp == NULL) {
901                 rte_exit(EXIT_FAILURE,
902                         "Creation of mbuf pool for socket %u failed: %s\n",
903                         socket_id, rte_strerror(rte_errno));
904         } else if (verbose_level > 0) {
905                 rte_mempool_dump(stdout, rte_mp);
906         }
907 }
908
909 /*
910  * Check given socket id is valid or not with NUMA mode,
911  * if valid, return 0, else return -1
912  */
913 static int
914 check_socket_id(const unsigned int socket_id)
915 {
916         static int warning_once = 0;
917
918         if (new_socket_id(socket_id)) {
919                 if (!warning_once && numa_support)
920                         printf("Warning: NUMA should be configured manually by"
921                                " using --port-numa-config and"
922                                " --ring-numa-config parameters along with"
923                                " --numa.\n");
924                 warning_once = 1;
925                 return -1;
926         }
927         return 0;
928 }
929
930 /*
931  * Get the allowed maximum number of RX queues.
932  * *pid return the port id which has minimal value of
933  * max_rx_queues in all ports.
934  */
935 queueid_t
936 get_allowed_max_nb_rxq(portid_t *pid)
937 {
938         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
939         portid_t pi;
940         struct rte_eth_dev_info dev_info;
941
942         RTE_ETH_FOREACH_DEV(pi) {
943                 rte_eth_dev_info_get(pi, &dev_info);
944                 if (dev_info.max_rx_queues < allowed_max_rxq) {
945                         allowed_max_rxq = dev_info.max_rx_queues;
946                         *pid = pi;
947                 }
948         }
949         return allowed_max_rxq;
950 }
951
952 /*
953  * Check input rxq is valid or not.
954  * If input rxq is not greater than any of maximum number
955  * of RX queues of all ports, it is valid.
956  * if valid, return 0, else return -1
957  */
958 int
959 check_nb_rxq(queueid_t rxq)
960 {
961         queueid_t allowed_max_rxq;
962         portid_t pid = 0;
963
964         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
965         if (rxq > allowed_max_rxq) {
966                 printf("Fail: input rxq (%u) can't be greater "
967                        "than max_rx_queues (%u) of port %u\n",
968                        rxq,
969                        allowed_max_rxq,
970                        pid);
971                 return -1;
972         }
973         return 0;
974 }
975
976 /*
977  * Get the allowed maximum number of TX queues.
978  * *pid return the port id which has minimal value of
979  * max_tx_queues in all ports.
980  */
981 queueid_t
982 get_allowed_max_nb_txq(portid_t *pid)
983 {
984         queueid_t allowed_max_txq = MAX_QUEUE_ID;
985         portid_t pi;
986         struct rte_eth_dev_info dev_info;
987
988         RTE_ETH_FOREACH_DEV(pi) {
989                 rte_eth_dev_info_get(pi, &dev_info);
990                 if (dev_info.max_tx_queues < allowed_max_txq) {
991                         allowed_max_txq = dev_info.max_tx_queues;
992                         *pid = pi;
993                 }
994         }
995         return allowed_max_txq;
996 }
997
998 /*
999  * Check input txq is valid or not.
1000  * If input txq is not greater than any of maximum number
1001  * of TX queues of all ports, it is valid.
1002  * if valid, return 0, else return -1
1003  */
1004 int
1005 check_nb_txq(queueid_t txq)
1006 {
1007         queueid_t allowed_max_txq;
1008         portid_t pid = 0;
1009
1010         allowed_max_txq = get_allowed_max_nb_txq(&pid);
1011         if (txq > allowed_max_txq) {
1012                 printf("Fail: input txq (%u) can't be greater "
1013                        "than max_tx_queues (%u) of port %u\n",
1014                        txq,
1015                        allowed_max_txq,
1016                        pid);
1017                 return -1;
1018         }
1019         return 0;
1020 }
1021
1022 static void
1023 init_config(void)
1024 {
1025         portid_t pid;
1026         struct rte_port *port;
1027         struct rte_mempool *mbp;
1028         unsigned int nb_mbuf_per_pool;
1029         lcoreid_t  lc_id;
1030         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1031         struct rte_gro_param gro_param;
1032         uint32_t gso_types;
1033         int k;
1034
1035         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1036
1037         /* Configuration of logical cores. */
1038         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1039                                 sizeof(struct fwd_lcore *) * nb_lcores,
1040                                 RTE_CACHE_LINE_SIZE);
1041         if (fwd_lcores == NULL) {
1042                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1043                                                         "failed\n", nb_lcores);
1044         }
1045         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1046                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1047                                                sizeof(struct fwd_lcore),
1048                                                RTE_CACHE_LINE_SIZE);
1049                 if (fwd_lcores[lc_id] == NULL) {
1050                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1051                                                                 "failed\n");
1052                 }
1053                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1054         }
1055
1056         RTE_ETH_FOREACH_DEV(pid) {
1057                 port = &ports[pid];
1058                 /* Apply default TxRx configuration for all ports */
1059                 port->dev_conf.txmode = tx_mode;
1060                 port->dev_conf.rxmode = rx_mode;
1061                 rte_eth_dev_info_get(pid, &port->dev_info);
1062
1063                 if (!(port->dev_info.tx_offload_capa &
1064                       DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1065                         port->dev_conf.txmode.offloads &=
1066                                 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1067                 if (!(port->dev_info.tx_offload_capa &
1068                         DEV_TX_OFFLOAD_MATCH_METADATA))
1069                         port->dev_conf.txmode.offloads &=
1070                                 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1071                 if (numa_support) {
1072                         if (port_numa[pid] != NUMA_NO_CONFIG)
1073                                 port_per_socket[port_numa[pid]]++;
1074                         else {
1075                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1076
1077                                 /*
1078                                  * if socket_id is invalid,
1079                                  * set to the first available socket.
1080                                  */
1081                                 if (check_socket_id(socket_id) < 0)
1082                                         socket_id = socket_ids[0];
1083                                 port_per_socket[socket_id]++;
1084                         }
1085                 }
1086
1087                 /* Apply Rx offloads configuration */
1088                 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1089                         port->rx_conf[k].offloads =
1090                                 port->dev_conf.rxmode.offloads;
1091                 /* Apply Tx offloads configuration */
1092                 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1093                         port->tx_conf[k].offloads =
1094                                 port->dev_conf.txmode.offloads;
1095
1096                 /* set flag to initialize port/queue */
1097                 port->need_reconfig = 1;
1098                 port->need_reconfig_queues = 1;
1099                 port->tx_metadata = 0;
1100         }
1101
1102         /*
1103          * Create pools of mbuf.
1104          * If NUMA support is disabled, create a single pool of mbuf in
1105          * socket 0 memory by default.
1106          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1107          *
1108          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1109          * nb_txd can be configured at run time.
1110          */
1111         if (param_total_num_mbufs)
1112                 nb_mbuf_per_pool = param_total_num_mbufs;
1113         else {
1114                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1115                         (nb_lcores * mb_mempool_cache) +
1116                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1117                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1118         }
1119
1120         if (numa_support) {
1121                 uint8_t i;
1122
1123                 for (i = 0; i < num_sockets; i++)
1124                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1125                                          socket_ids[i]);
1126         } else {
1127                 if (socket_num == UMA_NO_CONFIG)
1128                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1129                 else
1130                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1131                                                  socket_num);
1132         }
1133
1134         init_port_config();
1135
1136         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1137                 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1138         /*
1139          * Records which Mbuf pool to use by each logical core, if needed.
1140          */
1141         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1142                 mbp = mbuf_pool_find(
1143                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1144
1145                 if (mbp == NULL)
1146                         mbp = mbuf_pool_find(0);
1147                 fwd_lcores[lc_id]->mbp = mbp;
1148                 /* initialize GSO context */
1149                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1150                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1151                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1152                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1153                         ETHER_CRC_LEN;
1154                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1155         }
1156
1157         /* Configuration of packet forwarding streams. */
1158         if (init_fwd_streams() < 0)
1159                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1160
1161         fwd_config_setup();
1162
1163         /* create a gro context for each lcore */
1164         gro_param.gro_types = RTE_GRO_TCP_IPV4;
1165         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1166         gro_param.max_item_per_flow = MAX_PKT_BURST;
1167         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1168                 gro_param.socket_id = rte_lcore_to_socket_id(
1169                                 fwd_lcores_cpuids[lc_id]);
1170                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1171                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1172                         rte_exit(EXIT_FAILURE,
1173                                         "rte_gro_ctx_create() failed\n");
1174                 }
1175         }
1176
1177 #if defined RTE_LIBRTE_PMD_SOFTNIC
1178         if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1179                 RTE_ETH_FOREACH_DEV(pid) {
1180                         port = &ports[pid];
1181                         const char *driver = port->dev_info.driver_name;
1182
1183                         if (strcmp(driver, "net_softnic") == 0)
1184                                 port->softport.fwd_lcore_arg = fwd_lcores;
1185                 }
1186         }
1187 #endif
1188
1189 }
1190
1191
1192 void
1193 reconfig(portid_t new_port_id, unsigned socket_id)
1194 {
1195         struct rte_port *port;
1196
1197         /* Reconfiguration of Ethernet ports. */
1198         port = &ports[new_port_id];
1199         rte_eth_dev_info_get(new_port_id, &port->dev_info);
1200
1201         /* set flag to initialize port/queue */
1202         port->need_reconfig = 1;
1203         port->need_reconfig_queues = 1;
1204         port->socket_id = socket_id;
1205
1206         init_port_config();
1207 }
1208
1209
1210 int
1211 init_fwd_streams(void)
1212 {
1213         portid_t pid;
1214         struct rte_port *port;
1215         streamid_t sm_id, nb_fwd_streams_new;
1216         queueid_t q;
1217
1218         /* set socket id according to numa or not */
1219         RTE_ETH_FOREACH_DEV(pid) {
1220                 port = &ports[pid];
1221                 if (nb_rxq > port->dev_info.max_rx_queues) {
1222                         printf("Fail: nb_rxq(%d) is greater than "
1223                                 "max_rx_queues(%d)\n", nb_rxq,
1224                                 port->dev_info.max_rx_queues);
1225                         return -1;
1226                 }
1227                 if (nb_txq > port->dev_info.max_tx_queues) {
1228                         printf("Fail: nb_txq(%d) is greater than "
1229                                 "max_tx_queues(%d)\n", nb_txq,
1230                                 port->dev_info.max_tx_queues);
1231                         return -1;
1232                 }
1233                 if (numa_support) {
1234                         if (port_numa[pid] != NUMA_NO_CONFIG)
1235                                 port->socket_id = port_numa[pid];
1236                         else {
1237                                 port->socket_id = rte_eth_dev_socket_id(pid);
1238
1239                                 /*
1240                                  * if socket_id is invalid,
1241                                  * set to the first available socket.
1242                                  */
1243                                 if (check_socket_id(port->socket_id) < 0)
1244                                         port->socket_id = socket_ids[0];
1245                         }
1246                 }
1247                 else {
1248                         if (socket_num == UMA_NO_CONFIG)
1249                                 port->socket_id = 0;
1250                         else
1251                                 port->socket_id = socket_num;
1252                 }
1253         }
1254
1255         q = RTE_MAX(nb_rxq, nb_txq);
1256         if (q == 0) {
1257                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1258                 return -1;
1259         }
1260         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1261         if (nb_fwd_streams_new == nb_fwd_streams)
1262                 return 0;
1263         /* clear the old */
1264         if (fwd_streams != NULL) {
1265                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1266                         if (fwd_streams[sm_id] == NULL)
1267                                 continue;
1268                         rte_free(fwd_streams[sm_id]);
1269                         fwd_streams[sm_id] = NULL;
1270                 }
1271                 rte_free(fwd_streams);
1272                 fwd_streams = NULL;
1273         }
1274
1275         /* init new */
1276         nb_fwd_streams = nb_fwd_streams_new;
1277         if (nb_fwd_streams) {
1278                 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1279                         sizeof(struct fwd_stream *) * nb_fwd_streams,
1280                         RTE_CACHE_LINE_SIZE);
1281                 if (fwd_streams == NULL)
1282                         rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1283                                  " (struct fwd_stream *)) failed\n",
1284                                  nb_fwd_streams);
1285
1286                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1287                         fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1288                                 " struct fwd_stream", sizeof(struct fwd_stream),
1289                                 RTE_CACHE_LINE_SIZE);
1290                         if (fwd_streams[sm_id] == NULL)
1291                                 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1292                                          "(struct fwd_stream) failed\n");
1293                 }
1294         }
1295
1296         return 0;
1297 }
1298
1299 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1300 static void
1301 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1302 {
1303         unsigned int total_burst;
1304         unsigned int nb_burst;
1305         unsigned int burst_stats[3];
1306         uint16_t pktnb_stats[3];
1307         uint16_t nb_pkt;
1308         int burst_percent[3];
1309
1310         /*
1311          * First compute the total number of packet bursts and the
1312          * two highest numbers of bursts of the same number of packets.
1313          */
1314         total_burst = 0;
1315         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1316         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1317         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1318                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1319                 if (nb_burst == 0)
1320                         continue;
1321                 total_burst += nb_burst;
1322                 if (nb_burst > burst_stats[0]) {
1323                         burst_stats[1] = burst_stats[0];
1324                         pktnb_stats[1] = pktnb_stats[0];
1325                         burst_stats[0] = nb_burst;
1326                         pktnb_stats[0] = nb_pkt;
1327                 } else if (nb_burst > burst_stats[1]) {
1328                         burst_stats[1] = nb_burst;
1329                         pktnb_stats[1] = nb_pkt;
1330                 }
1331         }
1332         if (total_burst == 0)
1333                 return;
1334         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1335         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1336                burst_percent[0], (int) pktnb_stats[0]);
1337         if (burst_stats[0] == total_burst) {
1338                 printf("]\n");
1339                 return;
1340         }
1341         if (burst_stats[0] + burst_stats[1] == total_burst) {
1342                 printf(" + %d%% of %d pkts]\n",
1343                        100 - burst_percent[0], pktnb_stats[1]);
1344                 return;
1345         }
1346         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1347         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1348         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1349                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1350                 return;
1351         }
1352         printf(" + %d%% of %d pkts + %d%% of others]\n",
1353                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1354 }
1355 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1356
1357 static void
1358 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1359 {
1360         struct rte_port *port;
1361         uint8_t i;
1362
1363         static const char *fwd_stats_border = "----------------------";
1364
1365         port = &ports[port_id];
1366         printf("\n  %s Forward statistics for port %-2d %s\n",
1367                fwd_stats_border, port_id, fwd_stats_border);
1368
1369         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1370                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1371                        "%-"PRIu64"\n",
1372                        stats->ipackets, stats->imissed,
1373                        (uint64_t) (stats->ipackets + stats->imissed));
1374
1375                 if (cur_fwd_eng == &csum_fwd_engine)
1376                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1377                                port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1378                                port->rx_bad_outer_l4_csum);
1379                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1380                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1381                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1382                 }
1383
1384                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1385                        "%-"PRIu64"\n",
1386                        stats->opackets, port->tx_dropped,
1387                        (uint64_t) (stats->opackets + port->tx_dropped));
1388         }
1389         else {
1390                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1391                        "%14"PRIu64"\n",
1392                        stats->ipackets, stats->imissed,
1393                        (uint64_t) (stats->ipackets + stats->imissed));
1394
1395                 if (cur_fwd_eng == &csum_fwd_engine)
1396                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1397                                port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1398                                port->rx_bad_outer_l4_csum);
1399                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1400                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1401                         printf("  RX-nombufs:             %14"PRIu64"\n",
1402                                stats->rx_nombuf);
1403                 }
1404
1405                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1406                        "%14"PRIu64"\n",
1407                        stats->opackets, port->tx_dropped,
1408                        (uint64_t) (stats->opackets + port->tx_dropped));
1409         }
1410
1411 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1412         if (port->rx_stream)
1413                 pkt_burst_stats_display("RX",
1414                         &port->rx_stream->rx_burst_stats);
1415         if (port->tx_stream)
1416                 pkt_burst_stats_display("TX",
1417                         &port->tx_stream->tx_burst_stats);
1418 #endif
1419
1420         if (port->rx_queue_stats_mapping_enabled) {
1421                 printf("\n");
1422                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1423                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1424                                "     RX-errors:%14"PRIu64
1425                                "    RX-bytes:%14"PRIu64"\n",
1426                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1427                 }
1428                 printf("\n");
1429         }
1430         if (port->tx_queue_stats_mapping_enabled) {
1431                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1432                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1433                                "                                 TX-bytes:%14"PRIu64"\n",
1434                                i, stats->q_opackets[i], stats->q_obytes[i]);
1435                 }
1436         }
1437
1438         printf("  %s--------------------------------%s\n",
1439                fwd_stats_border, fwd_stats_border);
1440 }
1441
1442 static void
1443 fwd_stream_stats_display(streamid_t stream_id)
1444 {
1445         struct fwd_stream *fs;
1446         static const char *fwd_top_stats_border = "-------";
1447
1448         fs = fwd_streams[stream_id];
1449         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1450             (fs->fwd_dropped == 0))
1451                 return;
1452         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1453                "TX Port=%2d/Queue=%2d %s\n",
1454                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1455                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1456         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1457                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1458
1459         /* if checksum mode */
1460         if (cur_fwd_eng == &csum_fwd_engine) {
1461                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1462                         "%-14u Rx- bad outer L4 checksum: %-14u\n",
1463                         fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1464                         fs->rx_bad_outer_l4_csum);
1465         }
1466
1467 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1468         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1469         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1470 #endif
1471 }
1472
1473 static void
1474 flush_fwd_rx_queues(void)
1475 {
1476         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1477         portid_t  rxp;
1478         portid_t port_id;
1479         queueid_t rxq;
1480         uint16_t  nb_rx;
1481         uint16_t  i;
1482         uint8_t   j;
1483         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1484         uint64_t timer_period;
1485
1486         /* convert to number of cycles */
1487         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1488
1489         for (j = 0; j < 2; j++) {
1490                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1491                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1492                                 port_id = fwd_ports_ids[rxp];
1493                                 /**
1494                                 * testpmd can stuck in the below do while loop
1495                                 * if rte_eth_rx_burst() always returns nonzero
1496                                 * packets. So timer is added to exit this loop
1497                                 * after 1sec timer expiry.
1498                                 */
1499                                 prev_tsc = rte_rdtsc();
1500                                 do {
1501                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1502                                                 pkts_burst, MAX_PKT_BURST);
1503                                         for (i = 0; i < nb_rx; i++)
1504                                                 rte_pktmbuf_free(pkts_burst[i]);
1505
1506                                         cur_tsc = rte_rdtsc();
1507                                         diff_tsc = cur_tsc - prev_tsc;
1508                                         timer_tsc += diff_tsc;
1509                                 } while ((nb_rx > 0) &&
1510                                         (timer_tsc < timer_period));
1511                                 timer_tsc = 0;
1512                         }
1513                 }
1514                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1515         }
1516 }
1517
1518 static void
1519 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1520 {
1521         struct fwd_stream **fsm;
1522         streamid_t nb_fs;
1523         streamid_t sm_id;
1524 #ifdef RTE_LIBRTE_BITRATE
1525         uint64_t tics_per_1sec;
1526         uint64_t tics_datum;
1527         uint64_t tics_current;
1528         uint16_t i, cnt_ports;
1529
1530         cnt_ports = nb_ports;
1531         tics_datum = rte_rdtsc();
1532         tics_per_1sec = rte_get_timer_hz();
1533 #endif
1534         fsm = &fwd_streams[fc->stream_idx];
1535         nb_fs = fc->stream_nb;
1536         do {
1537                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1538                         (*pkt_fwd)(fsm[sm_id]);
1539 #ifdef RTE_LIBRTE_BITRATE
1540                 if (bitrate_enabled != 0 &&
1541                                 bitrate_lcore_id == rte_lcore_id()) {
1542                         tics_current = rte_rdtsc();
1543                         if (tics_current - tics_datum >= tics_per_1sec) {
1544                                 /* Periodic bitrate calculation */
1545                                 for (i = 0; i < cnt_ports; i++)
1546                                         rte_stats_bitrate_calc(bitrate_data,
1547                                                 ports_ids[i]);
1548                                 tics_datum = tics_current;
1549                         }
1550                 }
1551 #endif
1552 #ifdef RTE_LIBRTE_LATENCY_STATS
1553                 if (latencystats_enabled != 0 &&
1554                                 latencystats_lcore_id == rte_lcore_id())
1555                         rte_latencystats_update();
1556 #endif
1557
1558         } while (! fc->stopped);
1559 }
1560
1561 static int
1562 start_pkt_forward_on_core(void *fwd_arg)
1563 {
1564         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1565                              cur_fwd_config.fwd_eng->packet_fwd);
1566         return 0;
1567 }
1568
1569 /*
1570  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1571  * Used to start communication flows in network loopback test configurations.
1572  */
1573 static int
1574 run_one_txonly_burst_on_core(void *fwd_arg)
1575 {
1576         struct fwd_lcore *fwd_lc;
1577         struct fwd_lcore tmp_lcore;
1578
1579         fwd_lc = (struct fwd_lcore *) fwd_arg;
1580         tmp_lcore = *fwd_lc;
1581         tmp_lcore.stopped = 1;
1582         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1583         return 0;
1584 }
1585
1586 /*
1587  * Launch packet forwarding:
1588  *     - Setup per-port forwarding context.
1589  *     - launch logical cores with their forwarding configuration.
1590  */
1591 static void
1592 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1593 {
1594         port_fwd_begin_t port_fwd_begin;
1595         unsigned int i;
1596         unsigned int lc_id;
1597         int diag;
1598
1599         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1600         if (port_fwd_begin != NULL) {
1601                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1602                         (*port_fwd_begin)(fwd_ports_ids[i]);
1603         }
1604         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1605                 lc_id = fwd_lcores_cpuids[i];
1606                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1607                         fwd_lcores[i]->stopped = 0;
1608                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1609                                                      fwd_lcores[i], lc_id);
1610                         if (diag != 0)
1611                                 printf("launch lcore %u failed - diag=%d\n",
1612                                        lc_id, diag);
1613                 }
1614         }
1615 }
1616
1617 /*
1618  * Launch packet forwarding configuration.
1619  */
1620 void
1621 start_packet_forwarding(int with_tx_first)
1622 {
1623         port_fwd_begin_t port_fwd_begin;
1624         port_fwd_end_t  port_fwd_end;
1625         struct rte_port *port;
1626         unsigned int i;
1627         portid_t   pt_id;
1628         streamid_t sm_id;
1629
1630         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1631                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1632
1633         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1634                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1635
1636         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1637                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1638                 (!nb_rxq || !nb_txq))
1639                 rte_exit(EXIT_FAILURE,
1640                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1641                         cur_fwd_eng->fwd_mode_name);
1642
1643         if (all_ports_started() == 0) {
1644                 printf("Not all ports were started\n");
1645                 return;
1646         }
1647         if (test_done == 0) {
1648                 printf("Packet forwarding already started\n");
1649                 return;
1650         }
1651
1652
1653         if(dcb_test) {
1654                 for (i = 0; i < nb_fwd_ports; i++) {
1655                         pt_id = fwd_ports_ids[i];
1656                         port = &ports[pt_id];
1657                         if (!port->dcb_flag) {
1658                                 printf("In DCB mode, all forwarding ports must "
1659                                        "be configured in this mode.\n");
1660                                 return;
1661                         }
1662                 }
1663                 if (nb_fwd_lcores == 1) {
1664                         printf("In DCB mode,the nb forwarding cores "
1665                                "should be larger than 1.\n");
1666                         return;
1667                 }
1668         }
1669         test_done = 0;
1670
1671         fwd_config_setup();
1672
1673         if(!no_flush_rx)
1674                 flush_fwd_rx_queues();
1675
1676         pkt_fwd_config_display(&cur_fwd_config);
1677         rxtx_config_display();
1678
1679         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1680                 pt_id = fwd_ports_ids[i];
1681                 port = &ports[pt_id];
1682                 rte_eth_stats_get(pt_id, &port->stats);
1683                 port->tx_dropped = 0;
1684
1685                 map_port_queue_stats_mapping_registers(pt_id, port);
1686         }
1687         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1688                 fwd_streams[sm_id]->rx_packets = 0;
1689                 fwd_streams[sm_id]->tx_packets = 0;
1690                 fwd_streams[sm_id]->fwd_dropped = 0;
1691                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1692                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1693                 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1694
1695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1696                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1697                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1698                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1699                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1700 #endif
1701 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1702                 fwd_streams[sm_id]->core_cycles = 0;
1703 #endif
1704         }
1705         if (with_tx_first) {
1706                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1707                 if (port_fwd_begin != NULL) {
1708                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1709                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1710                 }
1711                 while (with_tx_first--) {
1712                         launch_packet_forwarding(
1713                                         run_one_txonly_burst_on_core);
1714                         rte_eal_mp_wait_lcore();
1715                 }
1716                 port_fwd_end = tx_only_engine.port_fwd_end;
1717                 if (port_fwd_end != NULL) {
1718                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1719                                 (*port_fwd_end)(fwd_ports_ids[i]);
1720                 }
1721         }
1722         launch_packet_forwarding(start_pkt_forward_on_core);
1723 }
1724
1725 void
1726 stop_packet_forwarding(void)
1727 {
1728         struct rte_eth_stats stats;
1729         struct rte_port *port;
1730         port_fwd_end_t  port_fwd_end;
1731         int i;
1732         portid_t   pt_id;
1733         streamid_t sm_id;
1734         lcoreid_t  lc_id;
1735         uint64_t total_recv;
1736         uint64_t total_xmit;
1737         uint64_t total_rx_dropped;
1738         uint64_t total_tx_dropped;
1739         uint64_t total_rx_nombuf;
1740         uint64_t tx_dropped;
1741         uint64_t rx_bad_ip_csum;
1742         uint64_t rx_bad_l4_csum;
1743 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1744         uint64_t fwd_cycles;
1745 #endif
1746
1747         static const char *acc_stats_border = "+++++++++++++++";
1748
1749         if (test_done) {
1750                 printf("Packet forwarding not started\n");
1751                 return;
1752         }
1753         printf("Telling cores to stop...");
1754         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1755                 fwd_lcores[lc_id]->stopped = 1;
1756         printf("\nWaiting for lcores to finish...\n");
1757         rte_eal_mp_wait_lcore();
1758         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1759         if (port_fwd_end != NULL) {
1760                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1761                         pt_id = fwd_ports_ids[i];
1762                         (*port_fwd_end)(pt_id);
1763                 }
1764         }
1765 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1766         fwd_cycles = 0;
1767 #endif
1768         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1769                 if (cur_fwd_config.nb_fwd_streams >
1770                     cur_fwd_config.nb_fwd_ports) {
1771                         fwd_stream_stats_display(sm_id);
1772                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1773                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1774                 } else {
1775                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1776                                 fwd_streams[sm_id];
1777                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1778                                 fwd_streams[sm_id];
1779                 }
1780                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1781                 tx_dropped = (uint64_t) (tx_dropped +
1782                                          fwd_streams[sm_id]->fwd_dropped);
1783                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1784
1785                 rx_bad_ip_csum =
1786                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1787                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1788                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1789                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1790                                                         rx_bad_ip_csum;
1791
1792                 rx_bad_l4_csum =
1793                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1794                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1795                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1796                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1797                                                         rx_bad_l4_csum;
1798
1799                 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1800                                 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1801
1802 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1803                 fwd_cycles = (uint64_t) (fwd_cycles +
1804                                          fwd_streams[sm_id]->core_cycles);
1805 #endif
1806         }
1807         total_recv = 0;
1808         total_xmit = 0;
1809         total_rx_dropped = 0;
1810         total_tx_dropped = 0;
1811         total_rx_nombuf  = 0;
1812         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1813                 pt_id = fwd_ports_ids[i];
1814
1815                 port = &ports[pt_id];
1816                 rte_eth_stats_get(pt_id, &stats);
1817                 stats.ipackets -= port->stats.ipackets;
1818                 port->stats.ipackets = 0;
1819                 stats.opackets -= port->stats.opackets;
1820                 port->stats.opackets = 0;
1821                 stats.ibytes   -= port->stats.ibytes;
1822                 port->stats.ibytes = 0;
1823                 stats.obytes   -= port->stats.obytes;
1824                 port->stats.obytes = 0;
1825                 stats.imissed  -= port->stats.imissed;
1826                 port->stats.imissed = 0;
1827                 stats.oerrors  -= port->stats.oerrors;
1828                 port->stats.oerrors = 0;
1829                 stats.rx_nombuf -= port->stats.rx_nombuf;
1830                 port->stats.rx_nombuf = 0;
1831
1832                 total_recv += stats.ipackets;
1833                 total_xmit += stats.opackets;
1834                 total_rx_dropped += stats.imissed;
1835                 total_tx_dropped += port->tx_dropped;
1836                 total_rx_nombuf  += stats.rx_nombuf;
1837
1838                 fwd_port_stats_display(pt_id, &stats);
1839         }
1840
1841         printf("\n  %s Accumulated forward statistics for all ports"
1842                "%s\n",
1843                acc_stats_border, acc_stats_border);
1844         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1845                "%-"PRIu64"\n"
1846                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1847                "%-"PRIu64"\n",
1848                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1849                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1850         if (total_rx_nombuf > 0)
1851                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1852         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1853                "%s\n",
1854                acc_stats_border, acc_stats_border);
1855 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1856         if (total_recv > 0)
1857                 printf("\n  CPU cycles/packet=%u (total cycles="
1858                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1859                        (unsigned int)(fwd_cycles / total_recv),
1860                        fwd_cycles, total_recv);
1861 #endif
1862         printf("\nDone.\n");
1863         test_done = 1;
1864 }
1865
1866 void
1867 dev_set_link_up(portid_t pid)
1868 {
1869         if (rte_eth_dev_set_link_up(pid) < 0)
1870                 printf("\nSet link up fail.\n");
1871 }
1872
1873 void
1874 dev_set_link_down(portid_t pid)
1875 {
1876         if (rte_eth_dev_set_link_down(pid) < 0)
1877                 printf("\nSet link down fail.\n");
1878 }
1879
1880 static int
1881 all_ports_started(void)
1882 {
1883         portid_t pi;
1884         struct rte_port *port;
1885
1886         RTE_ETH_FOREACH_DEV(pi) {
1887                 port = &ports[pi];
1888                 /* Check if there is a port which is not started */
1889                 if ((port->port_status != RTE_PORT_STARTED) &&
1890                         (port->slave_flag == 0))
1891                         return 0;
1892         }
1893
1894         /* No port is not started */
1895         return 1;
1896 }
1897
1898 int
1899 port_is_stopped(portid_t port_id)
1900 {
1901         struct rte_port *port = &ports[port_id];
1902
1903         if ((port->port_status != RTE_PORT_STOPPED) &&
1904             (port->slave_flag == 0))
1905                 return 0;
1906         return 1;
1907 }
1908
1909 int
1910 all_ports_stopped(void)
1911 {
1912         portid_t pi;
1913
1914         RTE_ETH_FOREACH_DEV(pi) {
1915                 if (!port_is_stopped(pi))
1916                         return 0;
1917         }
1918
1919         return 1;
1920 }
1921
1922 int
1923 port_is_started(portid_t port_id)
1924 {
1925         if (port_id_is_invalid(port_id, ENABLED_WARN))
1926                 return 0;
1927
1928         if (ports[port_id].port_status != RTE_PORT_STARTED)
1929                 return 0;
1930
1931         return 1;
1932 }
1933
1934 int
1935 start_port(portid_t pid)
1936 {
1937         int diag, need_check_link_status = -1;
1938         portid_t pi;
1939         queueid_t qi;
1940         struct rte_port *port;
1941         struct ether_addr mac_addr;
1942         enum rte_eth_event_type event_type;
1943
1944         if (port_id_is_invalid(pid, ENABLED_WARN))
1945                 return 0;
1946
1947         if(dcb_config)
1948                 dcb_test = 1;
1949         RTE_ETH_FOREACH_DEV(pi) {
1950                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1951                         continue;
1952
1953                 need_check_link_status = 0;
1954                 port = &ports[pi];
1955                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1956                                                  RTE_PORT_HANDLING) == 0) {
1957                         printf("Port %d is now not stopped\n", pi);
1958                         continue;
1959                 }
1960
1961                 if (port->need_reconfig > 0) {
1962                         port->need_reconfig = 0;
1963
1964                         if (flow_isolate_all) {
1965                                 int ret = port_flow_isolate(pi, 1);
1966                                 if (ret) {
1967                                         printf("Failed to apply isolated"
1968                                                " mode on port %d\n", pi);
1969                                         return -1;
1970                                 }
1971                         }
1972                         configure_rxtx_dump_callbacks(0);
1973                         printf("Configuring Port %d (socket %u)\n", pi,
1974                                         port->socket_id);
1975                         /* configure port */
1976                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1977                                                 &(port->dev_conf));
1978                         if (diag != 0) {
1979                                 if (rte_atomic16_cmpset(&(port->port_status),
1980                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1981                                         printf("Port %d can not be set back "
1982                                                         "to stopped\n", pi);
1983                                 printf("Fail to configure port %d\n", pi);
1984                                 /* try to reconfigure port next time */
1985                                 port->need_reconfig = 1;
1986                                 return -1;
1987                         }
1988                 }
1989                 if (port->need_reconfig_queues > 0) {
1990                         port->need_reconfig_queues = 0;
1991                         /* setup tx queues */
1992                         for (qi = 0; qi < nb_txq; qi++) {
1993                                 if ((numa_support) &&
1994                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1995                                         diag = rte_eth_tx_queue_setup(pi, qi,
1996                                                 port->nb_tx_desc[qi],
1997                                                 txring_numa[pi],
1998                                                 &(port->tx_conf[qi]));
1999                                 else
2000                                         diag = rte_eth_tx_queue_setup(pi, qi,
2001                                                 port->nb_tx_desc[qi],
2002                                                 port->socket_id,
2003                                                 &(port->tx_conf[qi]));
2004
2005                                 if (diag == 0)
2006                                         continue;
2007
2008                                 /* Fail to setup tx queue, return */
2009                                 if (rte_atomic16_cmpset(&(port->port_status),
2010                                                         RTE_PORT_HANDLING,
2011                                                         RTE_PORT_STOPPED) == 0)
2012                                         printf("Port %d can not be set back "
2013                                                         "to stopped\n", pi);
2014                                 printf("Fail to configure port %d tx queues\n",
2015                                        pi);
2016                                 /* try to reconfigure queues next time */
2017                                 port->need_reconfig_queues = 1;
2018                                 return -1;
2019                         }
2020                         for (qi = 0; qi < nb_rxq; qi++) {
2021                                 /* setup rx queues */
2022                                 if ((numa_support) &&
2023                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2024                                         struct rte_mempool * mp =
2025                                                 mbuf_pool_find(rxring_numa[pi]);
2026                                         if (mp == NULL) {
2027                                                 printf("Failed to setup RX queue:"
2028                                                         "No mempool allocation"
2029                                                         " on the socket %d\n",
2030                                                         rxring_numa[pi]);
2031                                                 return -1;
2032                                         }
2033
2034                                         diag = rte_eth_rx_queue_setup(pi, qi,
2035                                              port->nb_rx_desc[qi],
2036                                              rxring_numa[pi],
2037                                              &(port->rx_conf[qi]),
2038                                              mp);
2039                                 } else {
2040                                         struct rte_mempool *mp =
2041                                                 mbuf_pool_find(port->socket_id);
2042                                         if (mp == NULL) {
2043                                                 printf("Failed to setup RX queue:"
2044                                                         "No mempool allocation"
2045                                                         " on the socket %d\n",
2046                                                         port->socket_id);
2047                                                 return -1;
2048                                         }
2049                                         diag = rte_eth_rx_queue_setup(pi, qi,
2050                                              port->nb_rx_desc[qi],
2051                                              port->socket_id,
2052                                              &(port->rx_conf[qi]),
2053                                              mp);
2054                                 }
2055                                 if (diag == 0)
2056                                         continue;
2057
2058                                 /* Fail to setup rx queue, return */
2059                                 if (rte_atomic16_cmpset(&(port->port_status),
2060                                                         RTE_PORT_HANDLING,
2061                                                         RTE_PORT_STOPPED) == 0)
2062                                         printf("Port %d can not be set back "
2063                                                         "to stopped\n", pi);
2064                                 printf("Fail to configure port %d rx queues\n",
2065                                        pi);
2066                                 /* try to reconfigure queues next time */
2067                                 port->need_reconfig_queues = 1;
2068                                 return -1;
2069                         }
2070                 }
2071                 configure_rxtx_dump_callbacks(verbose_level);
2072                 /* start port */
2073                 if (rte_eth_dev_start(pi) < 0) {
2074                         printf("Fail to start port %d\n", pi);
2075
2076                         /* Fail to setup rx queue, return */
2077                         if (rte_atomic16_cmpset(&(port->port_status),
2078                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2079                                 printf("Port %d can not be set back to "
2080                                                         "stopped\n", pi);
2081                         continue;
2082                 }
2083
2084                 if (rte_atomic16_cmpset(&(port->port_status),
2085                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2086                         printf("Port %d can not be set into started\n", pi);
2087
2088                 rte_eth_macaddr_get(pi, &mac_addr);
2089                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2090                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2091                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2092                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2093
2094                 /* at least one port started, need checking link status */
2095                 need_check_link_status = 1;
2096         }
2097
2098         for (event_type = RTE_ETH_EVENT_UNKNOWN;
2099              event_type < RTE_ETH_EVENT_MAX;
2100              event_type++) {
2101                 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2102                                                 event_type,
2103                                                 eth_event_callback,
2104                                                 NULL);
2105                 if (diag) {
2106                         printf("Failed to setup even callback for event %d\n",
2107                                 event_type);
2108                         return -1;
2109                 }
2110         }
2111
2112         if (need_check_link_status == 1 && !no_link_check)
2113                 check_all_ports_link_status(RTE_PORT_ALL);
2114         else if (need_check_link_status == 0)
2115                 printf("Please stop the ports first\n");
2116
2117         printf("Done\n");
2118         return 0;
2119 }
2120
2121 void
2122 stop_port(portid_t pid)
2123 {
2124         portid_t pi;
2125         struct rte_port *port;
2126         int need_check_link_status = 0;
2127
2128         if (dcb_test) {
2129                 dcb_test = 0;
2130                 dcb_config = 0;
2131         }
2132
2133         if (port_id_is_invalid(pid, ENABLED_WARN))
2134                 return;
2135
2136         printf("Stopping ports...\n");
2137
2138         RTE_ETH_FOREACH_DEV(pi) {
2139                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2140                         continue;
2141
2142                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143                         printf("Please remove port %d from forwarding configuration.\n", pi);
2144                         continue;
2145                 }
2146
2147                 if (port_is_bonding_slave(pi)) {
2148                         printf("Please remove port %d from bonded device.\n", pi);
2149                         continue;
2150                 }
2151
2152                 port = &ports[pi];
2153                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154                                                 RTE_PORT_HANDLING) == 0)
2155                         continue;
2156
2157                 rte_eth_dev_stop(pi);
2158
2159                 if (rte_atomic16_cmpset(&(port->port_status),
2160                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161                         printf("Port %d can not be set into stopped\n", pi);
2162                 need_check_link_status = 1;
2163         }
2164         if (need_check_link_status && !no_link_check)
2165                 check_all_ports_link_status(RTE_PORT_ALL);
2166
2167         printf("Done\n");
2168 }
2169
2170 static void
2171 remove_invalid_ports_in(portid_t *array, portid_t *total)
2172 {
2173         portid_t i;
2174         portid_t new_total = 0;
2175
2176         for (i = 0; i < *total; i++)
2177                 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2178                         array[new_total] = array[i];
2179                         new_total++;
2180                 }
2181         *total = new_total;
2182 }
2183
2184 static void
2185 remove_invalid_ports(void)
2186 {
2187         remove_invalid_ports_in(ports_ids, &nb_ports);
2188         remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2189         nb_cfg_ports = nb_fwd_ports;
2190 }
2191
2192 void
2193 close_port(portid_t pid)
2194 {
2195         portid_t pi;
2196         struct rte_port *port;
2197
2198         if (port_id_is_invalid(pid, ENABLED_WARN))
2199                 return;
2200
2201         printf("Closing ports...\n");
2202
2203         RTE_ETH_FOREACH_DEV(pi) {
2204                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2205                         continue;
2206
2207                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208                         printf("Please remove port %d from forwarding configuration.\n", pi);
2209                         continue;
2210                 }
2211
2212                 if (port_is_bonding_slave(pi)) {
2213                         printf("Please remove port %d from bonded device.\n", pi);
2214                         continue;
2215                 }
2216
2217                 port = &ports[pi];
2218                 if (rte_atomic16_cmpset(&(port->port_status),
2219                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220                         printf("Port %d is already closed\n", pi);
2221                         continue;
2222                 }
2223
2224                 if (rte_atomic16_cmpset(&(port->port_status),
2225                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226                         printf("Port %d is now not stopped\n", pi);
2227                         continue;
2228                 }
2229
2230                 if (port->flow_list)
2231                         port_flow_flush(pi);
2232                 rte_eth_dev_close(pi);
2233
2234                 remove_invalid_ports();
2235
2236                 if (rte_atomic16_cmpset(&(port->port_status),
2237                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238                         printf("Port %d cannot be set to closed\n", pi);
2239         }
2240
2241         printf("Done\n");
2242 }
2243
2244 void
2245 reset_port(portid_t pid)
2246 {
2247         int diag;
2248         portid_t pi;
2249         struct rte_port *port;
2250
2251         if (port_id_is_invalid(pid, ENABLED_WARN))
2252                 return;
2253
2254         printf("Resetting ports...\n");
2255
2256         RTE_ETH_FOREACH_DEV(pi) {
2257                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2258                         continue;
2259
2260                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2261                         printf("Please remove port %d from forwarding "
2262                                "configuration.\n", pi);
2263                         continue;
2264                 }
2265
2266                 if (port_is_bonding_slave(pi)) {
2267                         printf("Please remove port %d from bonded device.\n",
2268                                pi);
2269                         continue;
2270                 }
2271
2272                 diag = rte_eth_dev_reset(pi);
2273                 if (diag == 0) {
2274                         port = &ports[pi];
2275                         port->need_reconfig = 1;
2276                         port->need_reconfig_queues = 1;
2277                 } else {
2278                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
2279                 }
2280         }
2281
2282         printf("Done\n");
2283 }
2284
2285 void
2286 attach_port(char *identifier)
2287 {
2288         portid_t pi = 0;
2289         struct rte_dev_iterator iterator;
2290
2291         printf("Attaching a new port...\n");
2292
2293         if (identifier == NULL) {
2294                 printf("Invalid parameters are specified\n");
2295                 return;
2296         }
2297
2298         if (rte_dev_probe(identifier) != 0) {
2299                 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2300                 return;
2301         }
2302
2303         RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2304                 if (port_is_forwarding(pi))
2305                         continue; /* port was already attached before */
2306                 setup_attached_port(pi);
2307         }
2308 }
2309
2310 static void
2311 setup_attached_port(portid_t pi)
2312 {
2313         unsigned int socket_id;
2314
2315         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2316         /* if socket_id is invalid, set to the first available socket. */
2317         if (check_socket_id(socket_id) < 0)
2318                 socket_id = socket_ids[0];
2319         reconfig(pi, socket_id);
2320         rte_eth_promiscuous_enable(pi);
2321
2322         ports_ids[nb_ports++] = pi;
2323         fwd_ports_ids[nb_fwd_ports++] = pi;
2324         nb_cfg_ports = nb_fwd_ports;
2325         ports[pi].port_status = RTE_PORT_STOPPED;
2326
2327         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2328         printf("Done\n");
2329 }
2330
2331 void
2332 detach_port_device(portid_t port_id)
2333 {
2334         struct rte_device *dev;
2335         portid_t sibling;
2336
2337         printf("Removing a device...\n");
2338
2339         dev = rte_eth_devices[port_id].device;
2340         if (dev == NULL) {
2341                 printf("Device already removed\n");
2342                 return;
2343         }
2344
2345         if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2346                 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2347                         printf("Port not stopped\n");
2348                         return;
2349                 }
2350                 printf("Port was not closed\n");
2351                 if (ports[port_id].flow_list)
2352                         port_flow_flush(port_id);
2353         }
2354
2355         if (rte_dev_remove(dev) != 0) {
2356                 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2357                 return;
2358         }
2359
2360         for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2361                 if (rte_eth_devices[sibling].device != dev)
2362                         continue;
2363                 /* reset mapping between old ports and removed device */
2364                 rte_eth_devices[sibling].device = NULL;
2365                 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2366                         /* sibling ports are forced to be closed */
2367                         ports[sibling].port_status = RTE_PORT_CLOSED;
2368                         printf("Port %u is closed\n", sibling);
2369                 }
2370         }
2371
2372         remove_invalid_ports();
2373
2374         printf("Device of port %u is detached\n", port_id);
2375         printf("Now total ports is %d\n", nb_ports);
2376         printf("Done\n");
2377         return;
2378 }
2379
2380 void
2381 pmd_test_exit(void)
2382 {
2383         struct rte_device *device;
2384         portid_t pt_id;
2385         int ret;
2386
2387         if (test_done == 0)
2388                 stop_packet_forwarding();
2389
2390         if (ports != NULL) {
2391                 no_link_check = 1;
2392                 RTE_ETH_FOREACH_DEV(pt_id) {
2393                         printf("\nShutting down port %d...\n", pt_id);
2394                         fflush(stdout);
2395                         stop_port(pt_id);
2396                         close_port(pt_id);
2397
2398                         /*
2399                          * This is a workaround to fix a virtio-user issue that
2400                          * requires to call clean-up routine to remove existing
2401                          * socket.
2402                          * This workaround valid only for testpmd, needs a fix
2403                          * valid for all applications.
2404                          * TODO: Implement proper resource cleanup
2405                          */
2406                         device = rte_eth_devices[pt_id].device;
2407                         if (device && !strcmp(device->driver->name, "net_virtio_user"))
2408                                 detach_port_device(pt_id);
2409                 }
2410         }
2411
2412         if (hot_plug) {
2413                 ret = rte_dev_event_monitor_stop();
2414                 if (ret) {
2415                         RTE_LOG(ERR, EAL,
2416                                 "fail to stop device event monitor.");
2417                         return;
2418                 }
2419
2420                 ret = rte_dev_event_callback_unregister(NULL,
2421                         eth_dev_event_callback, NULL);
2422                 if (ret < 0) {
2423                         RTE_LOG(ERR, EAL,
2424                                 "fail to unregister device event callback.\n");
2425                         return;
2426                 }
2427
2428                 ret = rte_dev_hotplug_handle_disable();
2429                 if (ret) {
2430                         RTE_LOG(ERR, EAL,
2431                                 "fail to disable hotplug handling.\n");
2432                         return;
2433                 }
2434         }
2435
2436         printf("\nBye...\n");
2437 }
2438
2439 typedef void (*cmd_func_t)(void);
2440 struct pmd_test_command {
2441         const char *cmd_name;
2442         cmd_func_t cmd_func;
2443 };
2444
2445 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2446
2447 /* Check the link status of all ports in up to 9s, and print them finally */
2448 static void
2449 check_all_ports_link_status(uint32_t port_mask)
2450 {
2451 #define CHECK_INTERVAL 100 /* 100ms */
2452 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2453         portid_t portid;
2454         uint8_t count, all_ports_up, print_flag = 0;
2455         struct rte_eth_link link;
2456
2457         printf("Checking link statuses...\n");
2458         fflush(stdout);
2459         for (count = 0; count <= MAX_CHECK_TIME; count++) {
2460                 all_ports_up = 1;
2461                 RTE_ETH_FOREACH_DEV(portid) {
2462                         if ((port_mask & (1 << portid)) == 0)
2463                                 continue;
2464                         memset(&link, 0, sizeof(link));
2465                         rte_eth_link_get_nowait(portid, &link);
2466                         /* print link status if flag set */
2467                         if (print_flag == 1) {
2468                                 if (link.link_status)
2469                                         printf(
2470                                         "Port%d Link Up. speed %u Mbps- %s\n",
2471                                         portid, link.link_speed,
2472                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2473                                         ("full-duplex") : ("half-duplex\n"));
2474                                 else
2475                                         printf("Port %d Link Down\n", portid);
2476                                 continue;
2477                         }
2478                         /* clear all_ports_up flag if any link down */
2479                         if (link.link_status == ETH_LINK_DOWN) {
2480                                 all_ports_up = 0;
2481                                 break;
2482                         }
2483                 }
2484                 /* after finally printing all link status, get out */
2485                 if (print_flag == 1)
2486                         break;
2487
2488                 if (all_ports_up == 0) {
2489                         fflush(stdout);
2490                         rte_delay_ms(CHECK_INTERVAL);
2491                 }
2492
2493                 /* set the print_flag if all ports up or timeout */
2494                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2495                         print_flag = 1;
2496                 }
2497
2498                 if (lsc_interrupt)
2499                         break;
2500         }
2501 }
2502
2503 static void
2504 rmv_event_callback(void *arg)
2505 {
2506         int need_to_start = 0;
2507         int org_no_link_check = no_link_check;
2508         portid_t port_id = (intptr_t)arg;
2509
2510         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2511
2512         if (!test_done && port_is_forwarding(port_id)) {
2513                 need_to_start = 1;
2514                 stop_packet_forwarding();
2515         }
2516         no_link_check = 1;
2517         stop_port(port_id);
2518         no_link_check = org_no_link_check;
2519         close_port(port_id);
2520         detach_port_device(port_id);
2521         if (need_to_start)
2522                 start_packet_forwarding(0);
2523 }
2524
2525 /* This function is used by the interrupt thread */
2526 static int
2527 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2528                   void *ret_param)
2529 {
2530         static const char * const event_desc[] = {
2531                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2532                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2533                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2534                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2535                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2536                 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2537                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2538                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2539                 [RTE_ETH_EVENT_NEW] = "device probed",
2540                 [RTE_ETH_EVENT_DESTROY] = "device released",
2541                 [RTE_ETH_EVENT_MAX] = NULL,
2542         };
2543
2544         RTE_SET_USED(param);
2545         RTE_SET_USED(ret_param);
2546
2547         if (type >= RTE_ETH_EVENT_MAX) {
2548                 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2549                         port_id, __func__, type);
2550                 fflush(stderr);
2551         } else if (event_print_mask & (UINT32_C(1) << type)) {
2552                 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2553                         event_desc[type]);
2554                 fflush(stdout);
2555         }
2556
2557         if (port_id_is_invalid(port_id, DISABLED_WARN))
2558                 return 0;
2559
2560         switch (type) {
2561         case RTE_ETH_EVENT_INTR_RMV:
2562                 if (rte_eal_alarm_set(100000,
2563                                 rmv_event_callback, (void *)(intptr_t)port_id))
2564                         fprintf(stderr, "Could not set up deferred device removal\n");
2565                 break;
2566         default:
2567                 break;
2568         }
2569         return 0;
2570 }
2571
2572 /* This function is used by the interrupt thread */
2573 static void
2574 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2575                              __rte_unused void *arg)
2576 {
2577         uint16_t port_id;
2578         int ret;
2579
2580         if (type >= RTE_DEV_EVENT_MAX) {
2581                 fprintf(stderr, "%s called upon invalid event %d\n",
2582                         __func__, type);
2583                 fflush(stderr);
2584         }
2585
2586         switch (type) {
2587         case RTE_DEV_EVENT_REMOVE:
2588                 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2589                         device_name);
2590                 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2591                 if (ret) {
2592                         RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2593                                 device_name);
2594                         return;
2595                 }
2596                 rmv_event_callback((void *)(intptr_t)port_id);
2597                 break;
2598         case RTE_DEV_EVENT_ADD:
2599                 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2600                         device_name);
2601                 /* TODO: After finish kernel driver binding,
2602                  * begin to attach port.
2603                  */
2604                 break;
2605         default:
2606                 break;
2607         }
2608 }
2609
2610 static int
2611 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2612 {
2613         uint16_t i;
2614         int diag;
2615         uint8_t mapping_found = 0;
2616
2617         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2618                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2619                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2620                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2621                                         tx_queue_stats_mappings[i].queue_id,
2622                                         tx_queue_stats_mappings[i].stats_counter_id);
2623                         if (diag != 0)
2624                                 return diag;
2625                         mapping_found = 1;
2626                 }
2627         }
2628         if (mapping_found)
2629                 port->tx_queue_stats_mapping_enabled = 1;
2630         return 0;
2631 }
2632
2633 static int
2634 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2635 {
2636         uint16_t i;
2637         int diag;
2638         uint8_t mapping_found = 0;
2639
2640         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2641                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2642                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2643                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2644                                         rx_queue_stats_mappings[i].queue_id,
2645                                         rx_queue_stats_mappings[i].stats_counter_id);
2646                         if (diag != 0)
2647                                 return diag;
2648                         mapping_found = 1;
2649                 }
2650         }
2651         if (mapping_found)
2652                 port->rx_queue_stats_mapping_enabled = 1;
2653         return 0;
2654 }
2655
2656 static void
2657 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2658 {
2659         int diag = 0;
2660
2661         diag = set_tx_queue_stats_mapping_registers(pi, port);
2662         if (diag != 0) {
2663                 if (diag == -ENOTSUP) {
2664                         port->tx_queue_stats_mapping_enabled = 0;
2665                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2666                 }
2667                 else
2668                         rte_exit(EXIT_FAILURE,
2669                                         "set_tx_queue_stats_mapping_registers "
2670                                         "failed for port id=%d diag=%d\n",
2671                                         pi, diag);
2672         }
2673
2674         diag = set_rx_queue_stats_mapping_registers(pi, port);
2675         if (diag != 0) {
2676                 if (diag == -ENOTSUP) {
2677                         port->rx_queue_stats_mapping_enabled = 0;
2678                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2679                 }
2680                 else
2681                         rte_exit(EXIT_FAILURE,
2682                                         "set_rx_queue_stats_mapping_registers "
2683                                         "failed for port id=%d diag=%d\n",
2684                                         pi, diag);
2685         }
2686 }
2687
2688 static void
2689 rxtx_port_config(struct rte_port *port)
2690 {
2691         uint16_t qid;
2692
2693         for (qid = 0; qid < nb_rxq; qid++) {
2694                 port->rx_conf[qid] = port->dev_info.default_rxconf;
2695
2696                 /* Check if any Rx parameters have been passed */
2697                 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2698                         port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2699
2700                 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2701                         port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2702
2703                 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2704                         port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2705
2706                 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2707                         port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2708
2709                 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2710                         port->rx_conf[qid].rx_drop_en = rx_drop_en;
2711
2712                 port->nb_rx_desc[qid] = nb_rxd;
2713         }
2714
2715         for (qid = 0; qid < nb_txq; qid++) {
2716                 port->tx_conf[qid] = port->dev_info.default_txconf;
2717
2718                 /* Check if any Tx parameters have been passed */
2719                 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2720                         port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2721
2722                 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2723                         port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2724
2725                 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2726                         port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2727
2728                 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2729                         port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2730
2731                 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2732                         port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2733
2734                 port->nb_tx_desc[qid] = nb_txd;
2735         }
2736 }
2737
2738 void
2739 init_port_config(void)
2740 {
2741         portid_t pid;
2742         struct rte_port *port;
2743
2744         RTE_ETH_FOREACH_DEV(pid) {
2745                 port = &ports[pid];
2746                 port->dev_conf.fdir_conf = fdir_conf;
2747                 rte_eth_dev_info_get(pid, &port->dev_info);
2748                 if (nb_rxq > 1) {
2749                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2750                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2751                                 rss_hf & port->dev_info.flow_type_rss_offloads;
2752                 } else {
2753                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2754                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2755                 }
2756
2757                 if (port->dcb_flag == 0) {
2758                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2759                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2760                         else
2761                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2762                 }
2763
2764                 rxtx_port_config(port);
2765
2766                 rte_eth_macaddr_get(pid, &port->eth_addr);
2767
2768                 map_port_queue_stats_mapping_registers(pid, port);
2769 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2770                 rte_pmd_ixgbe_bypass_init(pid);
2771 #endif
2772
2773                 if (lsc_interrupt &&
2774                     (rte_eth_devices[pid].data->dev_flags &
2775                      RTE_ETH_DEV_INTR_LSC))
2776                         port->dev_conf.intr_conf.lsc = 1;
2777                 if (rmv_interrupt &&
2778                     (rte_eth_devices[pid].data->dev_flags &
2779                      RTE_ETH_DEV_INTR_RMV))
2780                         port->dev_conf.intr_conf.rmv = 1;
2781         }
2782 }
2783
2784 void set_port_slave_flag(portid_t slave_pid)
2785 {
2786         struct rte_port *port;
2787
2788         port = &ports[slave_pid];
2789         port->slave_flag = 1;
2790 }
2791
2792 void clear_port_slave_flag(portid_t slave_pid)
2793 {
2794         struct rte_port *port;
2795
2796         port = &ports[slave_pid];
2797         port->slave_flag = 0;
2798 }
2799
2800 uint8_t port_is_bonding_slave(portid_t slave_pid)
2801 {
2802         struct rte_port *port;
2803
2804         port = &ports[slave_pid];
2805         if ((rte_eth_devices[slave_pid].data->dev_flags &
2806             RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2807                 return 1;
2808         return 0;
2809 }
2810
2811 const uint16_t vlan_tags[] = {
2812                 0,  1,  2,  3,  4,  5,  6,  7,
2813                 8,  9, 10, 11,  12, 13, 14, 15,
2814                 16, 17, 18, 19, 20, 21, 22, 23,
2815                 24, 25, 26, 27, 28, 29, 30, 31
2816 };
2817
2818 static  int
2819 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2820                  enum dcb_mode_enable dcb_mode,
2821                  enum rte_eth_nb_tcs num_tcs,
2822                  uint8_t pfc_en)
2823 {
2824         uint8_t i;
2825         int32_t rc;
2826         struct rte_eth_rss_conf rss_conf;
2827
2828         /*
2829          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2830          * given above, and the number of traffic classes available for use.
2831          */
2832         if (dcb_mode == DCB_VT_ENABLED) {
2833                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2834                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2835                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2836                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2837
2838                 /* VMDQ+DCB RX and TX configurations */
2839                 vmdq_rx_conf->enable_default_pool = 0;
2840                 vmdq_rx_conf->default_pool = 0;
2841                 vmdq_rx_conf->nb_queue_pools =
2842                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2843                 vmdq_tx_conf->nb_queue_pools =
2844                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2845
2846                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2847                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2848                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2849                         vmdq_rx_conf->pool_map[i].pools =
2850                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2851                 }
2852                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2853                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2854                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2855                 }
2856
2857                 /* set DCB mode of RX and TX of multiple queues */
2858                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2859                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2860         } else {
2861                 struct rte_eth_dcb_rx_conf *rx_conf =
2862                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2863                 struct rte_eth_dcb_tx_conf *tx_conf =
2864                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2865
2866                 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2867                 if (rc != 0)
2868                         return rc;
2869
2870                 rx_conf->nb_tcs = num_tcs;
2871                 tx_conf->nb_tcs = num_tcs;
2872
2873                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2874                         rx_conf->dcb_tc[i] = i % num_tcs;
2875                         tx_conf->dcb_tc[i] = i % num_tcs;
2876                 }
2877
2878                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2879                 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2880                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2881         }
2882
2883         if (pfc_en)
2884                 eth_conf->dcb_capability_en =
2885                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2886         else
2887                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2888
2889         return 0;
2890 }
2891
2892 int
2893 init_port_dcb_config(portid_t pid,
2894                      enum dcb_mode_enable dcb_mode,
2895                      enum rte_eth_nb_tcs num_tcs,
2896                      uint8_t pfc_en)
2897 {
2898         struct rte_eth_conf port_conf;
2899         struct rte_port *rte_port;
2900         int retval;
2901         uint16_t i;
2902
2903         rte_port = &ports[pid];
2904
2905         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2906         /* Enter DCB configuration status */
2907         dcb_config = 1;
2908
2909         port_conf.rxmode = rte_port->dev_conf.rxmode;
2910         port_conf.txmode = rte_port->dev_conf.txmode;
2911
2912         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2913         retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2914         if (retval < 0)
2915                 return retval;
2916         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2917
2918         /* re-configure the device . */
2919         rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2920
2921         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2922
2923         /* If dev_info.vmdq_pool_base is greater than 0,
2924          * the queue id of vmdq pools is started after pf queues.
2925          */
2926         if (dcb_mode == DCB_VT_ENABLED &&
2927             rte_port->dev_info.vmdq_pool_base > 0) {
2928                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2929                         " for port %d.", pid);
2930                 return -1;
2931         }
2932
2933         /* Assume the ports in testpmd have the same dcb capability
2934          * and has the same number of rxq and txq in dcb mode
2935          */
2936         if (dcb_mode == DCB_VT_ENABLED) {
2937                 if (rte_port->dev_info.max_vfs > 0) {
2938                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2939                         nb_txq = rte_port->dev_info.nb_tx_queues;
2940                 } else {
2941                         nb_rxq = rte_port->dev_info.max_rx_queues;
2942                         nb_txq = rte_port->dev_info.max_tx_queues;
2943                 }
2944         } else {
2945                 /*if vt is disabled, use all pf queues */
2946                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2947                         nb_rxq = rte_port->dev_info.max_rx_queues;
2948                         nb_txq = rte_port->dev_info.max_tx_queues;
2949                 } else {
2950                         nb_rxq = (queueid_t)num_tcs;
2951                         nb_txq = (queueid_t)num_tcs;
2952
2953                 }
2954         }
2955         rx_free_thresh = 64;
2956
2957         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2958
2959         rxtx_port_config(rte_port);
2960         /* VLAN filter */
2961         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2962         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2963                 rx_vft_set(pid, vlan_tags[i], 1);
2964
2965         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2966         map_port_queue_stats_mapping_registers(pid, rte_port);
2967
2968         rte_port->dcb_flag = 1;
2969
2970         return 0;
2971 }
2972
2973 static void
2974 init_port(void)
2975 {
2976         /* Configuration of Ethernet ports. */
2977         ports = rte_zmalloc("testpmd: ports",
2978                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2979                             RTE_CACHE_LINE_SIZE);
2980         if (ports == NULL) {
2981                 rte_exit(EXIT_FAILURE,
2982                                 "rte_zmalloc(%d struct rte_port) failed\n",
2983                                 RTE_MAX_ETHPORTS);
2984         }
2985
2986         /* Initialize ports NUMA structures */
2987         memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2988         memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2989         memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2990 }
2991
2992 static void
2993 force_quit(void)
2994 {
2995         pmd_test_exit();
2996         prompt_exit();
2997 }
2998
2999 static void
3000 print_stats(void)
3001 {
3002         uint8_t i;
3003         const char clr[] = { 27, '[', '2', 'J', '\0' };
3004         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3005
3006         /* Clear screen and move to top left */
3007         printf("%s%s", clr, top_left);
3008
3009         printf("\nPort statistics ====================================");
3010         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3011                 nic_stats_display(fwd_ports_ids[i]);
3012 }
3013
3014 static void
3015 signal_handler(int signum)
3016 {
3017         if (signum == SIGINT || signum == SIGTERM) {
3018                 printf("\nSignal %d received, preparing to exit...\n",
3019                                 signum);
3020 #ifdef RTE_LIBRTE_PDUMP
3021                 /* uninitialize packet capture framework */
3022                 rte_pdump_uninit();
3023 #endif
3024 #ifdef RTE_LIBRTE_LATENCY_STATS
3025                 rte_latencystats_uninit();
3026 #endif
3027                 force_quit();
3028                 /* Set flag to indicate the force termination. */
3029                 f_quit = 1;
3030                 /* exit with the expected status */
3031                 signal(signum, SIG_DFL);
3032                 kill(getpid(), signum);
3033         }
3034 }
3035
3036 int
3037 main(int argc, char** argv)
3038 {
3039         int diag;
3040         portid_t port_id;
3041         uint16_t count;
3042         int ret;
3043
3044         signal(SIGINT, signal_handler);
3045         signal(SIGTERM, signal_handler);
3046
3047         diag = rte_eal_init(argc, argv);
3048         if (diag < 0)
3049                 rte_panic("Cannot init EAL\n");
3050
3051         testpmd_logtype = rte_log_register("testpmd");
3052         if (testpmd_logtype < 0)
3053                 rte_panic("Cannot register log type");
3054         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3055
3056 #ifdef RTE_LIBRTE_PDUMP
3057         /* initialize packet capture framework */
3058         rte_pdump_init(NULL);
3059 #endif
3060
3061         count = 0;
3062         RTE_ETH_FOREACH_DEV(port_id) {
3063                 ports_ids[count] = port_id;
3064                 count++;
3065         }
3066         nb_ports = (portid_t) count;
3067         if (nb_ports == 0)
3068                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3069
3070         /* allocate port structures, and init them */
3071         init_port();
3072
3073         set_def_fwd_config();
3074         if (nb_lcores == 0)
3075                 rte_panic("Empty set of forwarding logical cores - check the "
3076                           "core mask supplied in the command parameters\n");
3077
3078         /* Bitrate/latency stats disabled by default */
3079 #ifdef RTE_LIBRTE_BITRATE
3080         bitrate_enabled = 0;
3081 #endif
3082 #ifdef RTE_LIBRTE_LATENCY_STATS
3083         latencystats_enabled = 0;
3084 #endif
3085
3086         /* on FreeBSD, mlockall() is disabled by default */
3087 #ifdef RTE_EXEC_ENV_BSDAPP
3088         do_mlockall = 0;
3089 #else
3090         do_mlockall = 1;
3091 #endif
3092
3093         argc -= diag;
3094         argv += diag;
3095         if (argc > 1)
3096                 launch_args_parse(argc, argv);
3097
3098         if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3099                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3100                         strerror(errno));
3101         }
3102
3103         if (tx_first && interactive)
3104                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3105                                 "interactive mode.\n");
3106
3107         if (tx_first && lsc_interrupt) {
3108                 printf("Warning: lsc_interrupt needs to be off when "
3109                                 " using tx_first. Disabling.\n");
3110                 lsc_interrupt = 0;
3111         }
3112
3113         if (!nb_rxq && !nb_txq)
3114                 printf("Warning: Either rx or tx queues should be non-zero\n");
3115
3116         if (nb_rxq > 1 && nb_rxq > nb_txq)
3117                 printf("Warning: nb_rxq=%d enables RSS configuration, "
3118                        "but nb_txq=%d will prevent to fully test it.\n",
3119                        nb_rxq, nb_txq);
3120
3121         init_config();
3122
3123         if (hot_plug) {
3124                 ret = rte_dev_hotplug_handle_enable();
3125                 if (ret) {
3126                         RTE_LOG(ERR, EAL,
3127                                 "fail to enable hotplug handling.");
3128                         return -1;
3129                 }
3130
3131                 ret = rte_dev_event_monitor_start();
3132                 if (ret) {
3133                         RTE_LOG(ERR, EAL,
3134                                 "fail to start device event monitoring.");
3135                         return -1;
3136                 }
3137
3138                 ret = rte_dev_event_callback_register(NULL,
3139                         eth_dev_event_callback, NULL);
3140                 if (ret) {
3141                         RTE_LOG(ERR, EAL,
3142                                 "fail  to register device event callback\n");
3143                         return -1;
3144                 }
3145         }
3146
3147         if (start_port(RTE_PORT_ALL) != 0)
3148                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3149
3150         /* set all ports to promiscuous mode by default */
3151         RTE_ETH_FOREACH_DEV(port_id)
3152                 rte_eth_promiscuous_enable(port_id);
3153
3154         /* Init metrics library */
3155         rte_metrics_init(rte_socket_id());
3156
3157 #ifdef RTE_LIBRTE_LATENCY_STATS
3158         if (latencystats_enabled != 0) {
3159                 int ret = rte_latencystats_init(1, NULL);
3160                 if (ret)
3161                         printf("Warning: latencystats init()"
3162                                 " returned error %d\n", ret);
3163                 printf("Latencystats running on lcore %d\n",
3164                         latencystats_lcore_id);
3165         }
3166 #endif
3167
3168         /* Setup bitrate stats */
3169 #ifdef RTE_LIBRTE_BITRATE
3170         if (bitrate_enabled != 0) {
3171                 bitrate_data = rte_stats_bitrate_create();
3172                 if (bitrate_data == NULL)
3173                         rte_exit(EXIT_FAILURE,
3174                                 "Could not allocate bitrate data.\n");
3175                 rte_stats_bitrate_reg(bitrate_data);
3176         }
3177 #endif
3178
3179 #ifdef RTE_LIBRTE_CMDLINE
3180         if (strlen(cmdline_filename) != 0)
3181                 cmdline_read_from_file(cmdline_filename);
3182
3183         if (interactive == 1) {
3184                 if (auto_start) {
3185                         printf("Start automatic packet forwarding\n");
3186                         start_packet_forwarding(0);
3187                 }
3188                 prompt();
3189                 pmd_test_exit();
3190         } else
3191 #endif
3192         {
3193                 char c;
3194                 int rc;
3195
3196                 f_quit = 0;
3197
3198                 printf("No commandline core given, start packet forwarding\n");
3199                 start_packet_forwarding(tx_first);
3200                 if (stats_period != 0) {
3201                         uint64_t prev_time = 0, cur_time, diff_time = 0;
3202                         uint64_t timer_period;
3203
3204                         /* Convert to number of cycles */
3205                         timer_period = stats_period * rte_get_timer_hz();
3206
3207                         while (f_quit == 0) {
3208                                 cur_time = rte_get_timer_cycles();
3209                                 diff_time += cur_time - prev_time;
3210
3211                                 if (diff_time >= timer_period) {
3212                                         print_stats();
3213                                         /* Reset the timer */
3214                                         diff_time = 0;
3215                                 }
3216                                 /* Sleep to avoid unnecessary checks */
3217                                 prev_time = cur_time;
3218                                 sleep(1);
3219                         }
3220                 }
3221
3222                 printf("Press enter to exit\n");
3223                 rc = read(0, &c, 1);
3224                 pmd_test_exit();
3225                 if (rc < 0)
3226                         return 1;
3227         }
3228
3229         return 0;
3230 }