app/testpmd: collect bad outer L4 checksum for csum engine
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64
65 #include "testpmd.h"
66
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80
81 #define EXTMEM_HEAP_NAME "extmem"
82
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;        /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173         &io_fwd_engine,
174         &mac_fwd_engine,
175         &mac_swap_engine,
176         &flow_gen_engine,
177         &rx_only_engine,
178         &tx_only_engine,
179         &csum_fwd_engine,
180         &icmp_echo_engine,
181 #if defined RTE_LIBRTE_PMD_SOFTNIC
182         &softnic_fwd_engine,
183 #endif
184 #ifdef RTE_LIBRTE_IEEE1588
185         &ieee1588_fwd_engine,
186 #endif
187         NULL,
188 };
189
190 struct fwd_config cur_fwd_config;
191 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
192 uint32_t retry_enabled;
193 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
194 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
195
196 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
197 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
198                                       * specified on command-line. */
199 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
200
201 /*
202  * In container, it cannot terminate the process which running with 'stats-period'
203  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
204  */
205 uint8_t f_quit;
206
207 /*
208  * Configuration of packet segments used by the "txonly" processing engine.
209  */
210 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
211 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
212         TXONLY_DEF_PACKET_LEN,
213 };
214 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
215
216 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
217 /**< Split policy for packets to TX. */
218
219 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
220 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
221
222 /* current configuration is in DCB or not,0 means it is not in DCB mode */
223 uint8_t dcb_config = 0;
224
225 /* Whether the dcb is in testing status */
226 uint8_t dcb_test = 0;
227
228 /*
229  * Configurable number of RX/TX queues.
230  */
231 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
232 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
233
234 /*
235  * Configurable number of RX/TX ring descriptors.
236  * Defaults are supplied by drivers via ethdev.
237  */
238 #define RTE_TEST_RX_DESC_DEFAULT 0
239 #define RTE_TEST_TX_DESC_DEFAULT 0
240 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
241 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
242
243 #define RTE_PMD_PARAM_UNSET -1
244 /*
245  * Configurable values of RX and TX ring threshold registers.
246  */
247
248 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
249 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
251
252 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
255
256 /*
257  * Configurable value of RX free threshold.
258  */
259 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
260
261 /*
262  * Configurable value of RX drop enable.
263  */
264 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
265
266 /*
267  * Configurable value of TX free threshold.
268  */
269 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
270
271 /*
272  * Configurable value of TX RS bit threshold.
273  */
274 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
275
276 /*
277  * Receive Side Scaling (RSS) configuration.
278  */
279 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
280
281 /*
282  * Port topology configuration
283  */
284 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
285
286 /*
287  * Avoids to flush all the RX streams before starts forwarding.
288  */
289 uint8_t no_flush_rx = 0; /* flush by default */
290
291 /*
292  * Flow API isolated mode.
293  */
294 uint8_t flow_isolate_all;
295
296 /*
297  * Avoids to check link status when starting/stopping a port.
298  */
299 uint8_t no_link_check = 0; /* check by default */
300
301 /*
302  * Enable link status change notification
303  */
304 uint8_t lsc_interrupt = 1; /* enabled by default */
305
306 /*
307  * Enable device removal notification.
308  */
309 uint8_t rmv_interrupt = 1; /* enabled by default */
310
311 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
312
313 /*
314  * Display or mask ether events
315  * Default to all events except VF_MBOX
316  */
317 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
318                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
319                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
320                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
321                             (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
322                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
323                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
324 /*
325  * Decide if all memory are locked for performance.
326  */
327 int do_mlockall = 0;
328
329 /*
330  * NIC bypass mode configuration options.
331  */
332
333 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
334 /* The NIC bypass watchdog timeout. */
335 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
336 #endif
337
338
339 #ifdef RTE_LIBRTE_LATENCY_STATS
340
341 /*
342  * Set when latency stats is enabled in the commandline
343  */
344 uint8_t latencystats_enabled;
345
346 /*
347  * Lcore ID to serive latency statistics.
348  */
349 lcoreid_t latencystats_lcore_id = -1;
350
351 #endif
352
353 /*
354  * Ethernet device configuration.
355  */
356 struct rte_eth_rxmode rx_mode = {
357         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
358 };
359
360 struct rte_eth_txmode tx_mode = {
361         .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
362 };
363
364 struct rte_fdir_conf fdir_conf = {
365         .mode = RTE_FDIR_MODE_NONE,
366         .pballoc = RTE_FDIR_PBALLOC_64K,
367         .status = RTE_FDIR_REPORT_STATUS,
368         .mask = {
369                 .vlan_tci_mask = 0xFFEF,
370                 .ipv4_mask     = {
371                         .src_ip = 0xFFFFFFFF,
372                         .dst_ip = 0xFFFFFFFF,
373                 },
374                 .ipv6_mask     = {
375                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
376                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
377                 },
378                 .src_port_mask = 0xFFFF,
379                 .dst_port_mask = 0xFFFF,
380                 .mac_addr_byte_mask = 0xFF,
381                 .tunnel_type_mask = 1,
382                 .tunnel_id_mask = 0xFFFFFFFF,
383         },
384         .drop_queue = 127,
385 };
386
387 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
388
389 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
390 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
391
392 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
393 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
394
395 uint16_t nb_tx_queue_stats_mappings = 0;
396 uint16_t nb_rx_queue_stats_mappings = 0;
397
398 /*
399  * Display zero values by default for xstats
400  */
401 uint8_t xstats_hide_zero;
402
403 unsigned int num_sockets = 0;
404 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
405
406 #ifdef RTE_LIBRTE_BITRATE
407 /* Bitrate statistics */
408 struct rte_stats_bitrates *bitrate_data;
409 lcoreid_t bitrate_lcore_id;
410 uint8_t bitrate_enabled;
411 #endif
412
413 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
414 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
415
416 struct vxlan_encap_conf vxlan_encap_conf = {
417         .select_ipv4 = 1,
418         .select_vlan = 0,
419         .vni = "\x00\x00\x00",
420         .udp_src = 0,
421         .udp_dst = RTE_BE16(4789),
422         .ipv4_src = IPv4(127, 0, 0, 1),
423         .ipv4_dst = IPv4(255, 255, 255, 255),
424         .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
425                 "\x00\x00\x00\x00\x00\x00\x00\x01",
426         .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
427                 "\x00\x00\x00\x00\x00\x00\x11\x11",
428         .vlan_tci = 0,
429         .eth_src = "\x00\x00\x00\x00\x00\x00",
430         .eth_dst = "\xff\xff\xff\xff\xff\xff",
431 };
432
433 struct nvgre_encap_conf nvgre_encap_conf = {
434         .select_ipv4 = 1,
435         .select_vlan = 0,
436         .tni = "\x00\x00\x00",
437         .ipv4_src = IPv4(127, 0, 0, 1),
438         .ipv4_dst = IPv4(255, 255, 255, 255),
439         .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
440                 "\x00\x00\x00\x00\x00\x00\x00\x01",
441         .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
442                 "\x00\x00\x00\x00\x00\x00\x11\x11",
443         .vlan_tci = 0,
444         .eth_src = "\x00\x00\x00\x00\x00\x00",
445         .eth_dst = "\xff\xff\xff\xff\xff\xff",
446 };
447
448 /* Forward function declarations */
449 static void map_port_queue_stats_mapping_registers(portid_t pi,
450                                                    struct rte_port *port);
451 static void check_all_ports_link_status(uint32_t port_mask);
452 static int eth_event_callback(portid_t port_id,
453                               enum rte_eth_event_type type,
454                               void *param, void *ret_param);
455 static void eth_dev_event_callback(char *device_name,
456                                 enum rte_dev_event_type type,
457                                 void *param);
458 static int eth_dev_event_callback_register(void);
459 static int eth_dev_event_callback_unregister(void);
460
461
462 /*
463  * Check if all the ports are started.
464  * If yes, return positive value. If not, return zero.
465  */
466 static int all_ports_started(void);
467
468 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
469 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
470
471 /*
472  * Helper function to check if socket is already discovered.
473  * If yes, return positive value. If not, return zero.
474  */
475 int
476 new_socket_id(unsigned int socket_id)
477 {
478         unsigned int i;
479
480         for (i = 0; i < num_sockets; i++) {
481                 if (socket_ids[i] == socket_id)
482                         return 0;
483         }
484         return 1;
485 }
486
487 /*
488  * Setup default configuration.
489  */
490 static void
491 set_default_fwd_lcores_config(void)
492 {
493         unsigned int i;
494         unsigned int nb_lc;
495         unsigned int sock_num;
496
497         nb_lc = 0;
498         for (i = 0; i < RTE_MAX_LCORE; i++) {
499                 if (!rte_lcore_is_enabled(i))
500                         continue;
501                 sock_num = rte_lcore_to_socket_id(i);
502                 if (new_socket_id(sock_num)) {
503                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
504                                 rte_exit(EXIT_FAILURE,
505                                          "Total sockets greater than %u\n",
506                                          RTE_MAX_NUMA_NODES);
507                         }
508                         socket_ids[num_sockets++] = sock_num;
509                 }
510                 if (i == rte_get_master_lcore())
511                         continue;
512                 fwd_lcores_cpuids[nb_lc++] = i;
513         }
514         nb_lcores = (lcoreid_t) nb_lc;
515         nb_cfg_lcores = nb_lcores;
516         nb_fwd_lcores = 1;
517 }
518
519 static void
520 set_def_peer_eth_addrs(void)
521 {
522         portid_t i;
523
524         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
525                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
526                 peer_eth_addrs[i].addr_bytes[5] = i;
527         }
528 }
529
530 static void
531 set_default_fwd_ports_config(void)
532 {
533         portid_t pt_id;
534         int i = 0;
535
536         RTE_ETH_FOREACH_DEV(pt_id)
537                 fwd_ports_ids[i++] = pt_id;
538
539         nb_cfg_ports = nb_ports;
540         nb_fwd_ports = nb_ports;
541 }
542
543 void
544 set_def_fwd_config(void)
545 {
546         set_default_fwd_lcores_config();
547         set_def_peer_eth_addrs();
548         set_default_fwd_ports_config();
549 }
550
551 /* extremely pessimistic estimation of memory required to create a mempool */
552 static int
553 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
554 {
555         unsigned int n_pages, mbuf_per_pg, leftover;
556         uint64_t total_mem, mbuf_mem, obj_sz;
557
558         /* there is no good way to predict how much space the mempool will
559          * occupy because it will allocate chunks on the fly, and some of those
560          * will come from default DPDK memory while some will come from our
561          * external memory, so just assume 128MB will be enough for everyone.
562          */
563         uint64_t hdr_mem = 128 << 20;
564
565         /* account for possible non-contiguousness */
566         obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
567         if (obj_sz > pgsz) {
568                 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
569                 return -1;
570         }
571
572         mbuf_per_pg = pgsz / obj_sz;
573         leftover = (nb_mbufs % mbuf_per_pg) > 0;
574         n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
575
576         mbuf_mem = n_pages * pgsz;
577
578         total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
579
580         if (total_mem > SIZE_MAX) {
581                 TESTPMD_LOG(ERR, "Memory size too big\n");
582                 return -1;
583         }
584         *out = (size_t)total_mem;
585
586         return 0;
587 }
588
589 static inline uint32_t
590 bsf64(uint64_t v)
591 {
592         return (uint32_t)__builtin_ctzll(v);
593 }
594
595 static inline uint32_t
596 log2_u64(uint64_t v)
597 {
598         if (v == 0)
599                 return 0;
600         v = rte_align64pow2(v);
601         return bsf64(v);
602 }
603
604 static int
605 pagesz_flags(uint64_t page_sz)
606 {
607         /* as per mmap() manpage, all page sizes are log2 of page size
608          * shifted by MAP_HUGE_SHIFT
609          */
610         int log2 = log2_u64(page_sz);
611
612         return (log2 << HUGE_SHIFT);
613 }
614
615 static void *
616 alloc_mem(size_t memsz, size_t pgsz, bool huge)
617 {
618         void *addr;
619         int flags;
620
621         /* allocate anonymous hugepages */
622         flags = MAP_ANONYMOUS | MAP_PRIVATE;
623         if (huge)
624                 flags |= HUGE_FLAG | pagesz_flags(pgsz);
625
626         addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
627         if (addr == MAP_FAILED)
628                 return NULL;
629
630         return addr;
631 }
632
633 struct extmem_param {
634         void *addr;
635         size_t len;
636         size_t pgsz;
637         rte_iova_t *iova_table;
638         unsigned int iova_table_len;
639 };
640
641 static int
642 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
643                 bool huge)
644 {
645         uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
646                         RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
647         unsigned int cur_page, n_pages, pgsz_idx;
648         size_t mem_sz, cur_pgsz;
649         rte_iova_t *iovas = NULL;
650         void *addr;
651         int ret;
652
653         for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
654                 /* skip anything that is too big */
655                 if (pgsizes[pgsz_idx] > SIZE_MAX)
656                         continue;
657
658                 cur_pgsz = pgsizes[pgsz_idx];
659
660                 /* if we were told not to allocate hugepages, override */
661                 if (!huge)
662                         cur_pgsz = sysconf(_SC_PAGESIZE);
663
664                 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
665                 if (ret < 0) {
666                         TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
667                         return -1;
668                 }
669
670                 /* allocate our memory */
671                 addr = alloc_mem(mem_sz, cur_pgsz, huge);
672
673                 /* if we couldn't allocate memory with a specified page size,
674                  * that doesn't mean we can't do it with other page sizes, so
675                  * try another one.
676                  */
677                 if (addr == NULL)
678                         continue;
679
680                 /* store IOVA addresses for every page in this memory area */
681                 n_pages = mem_sz / cur_pgsz;
682
683                 iovas = malloc(sizeof(*iovas) * n_pages);
684
685                 if (iovas == NULL) {
686                         TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
687                         goto fail;
688                 }
689                 /* lock memory if it's not huge pages */
690                 if (!huge)
691                         mlock(addr, mem_sz);
692
693                 /* populate IOVA addresses */
694                 for (cur_page = 0; cur_page < n_pages; cur_page++) {
695                         rte_iova_t iova;
696                         size_t offset;
697                         void *cur;
698
699                         offset = cur_pgsz * cur_page;
700                         cur = RTE_PTR_ADD(addr, offset);
701
702                         /* touch the page before getting its IOVA */
703                         *(volatile char *)cur = 0;
704
705                         iova = rte_mem_virt2iova(cur);
706
707                         iovas[cur_page] = iova;
708                 }
709
710                 break;
711         }
712         /* if we couldn't allocate anything */
713         if (iovas == NULL)
714                 return -1;
715
716         param->addr = addr;
717         param->len = mem_sz;
718         param->pgsz = cur_pgsz;
719         param->iova_table = iovas;
720         param->iova_table_len = n_pages;
721
722         return 0;
723 fail:
724         if (iovas)
725                 free(iovas);
726         if (addr)
727                 munmap(addr, mem_sz);
728
729         return -1;
730 }
731
732 static int
733 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
734 {
735         struct extmem_param param;
736         int socket_id, ret;
737
738         memset(&param, 0, sizeof(param));
739
740         /* check if our heap exists */
741         socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
742         if (socket_id < 0) {
743                 /* create our heap */
744                 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
745                 if (ret < 0) {
746                         TESTPMD_LOG(ERR, "Cannot create heap\n");
747                         return -1;
748                 }
749         }
750
751         ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
752         if (ret < 0) {
753                 TESTPMD_LOG(ERR, "Cannot create memory area\n");
754                 return -1;
755         }
756
757         /* we now have a valid memory area, so add it to heap */
758         ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
759                         param.addr, param.len, param.iova_table,
760                         param.iova_table_len, param.pgsz);
761
762         /* when using VFIO, memory is automatically mapped for DMA by EAL */
763
764         /* not needed any more */
765         free(param.iova_table);
766
767         if (ret < 0) {
768                 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
769                 munmap(param.addr, param.len);
770                 return -1;
771         }
772
773         /* success */
774
775         TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
776                         param.len >> 20);
777
778         return 0;
779 }
780
781 /*
782  * Configuration initialisation done once at init time.
783  */
784 static void
785 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
786                  unsigned int socket_id)
787 {
788         char pool_name[RTE_MEMPOOL_NAMESIZE];
789         struct rte_mempool *rte_mp = NULL;
790         uint32_t mb_size;
791
792         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
793         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
794
795         TESTPMD_LOG(INFO,
796                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
797                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
798
799         switch (mp_alloc_type) {
800         case MP_ALLOC_NATIVE:
801                 {
802                         /* wrapper to rte_mempool_create() */
803                         TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
804                                         rte_mbuf_best_mempool_ops());
805                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
806                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
807                         break;
808                 }
809         case MP_ALLOC_ANON:
810                 {
811                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
812                                 mb_size, (unsigned int) mb_mempool_cache,
813                                 sizeof(struct rte_pktmbuf_pool_private),
814                                 socket_id, 0);
815                         if (rte_mp == NULL)
816                                 goto err;
817
818                         if (rte_mempool_populate_anon(rte_mp) == 0) {
819                                 rte_mempool_free(rte_mp);
820                                 rte_mp = NULL;
821                                 goto err;
822                         }
823                         rte_pktmbuf_pool_init(rte_mp, NULL);
824                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
825                         break;
826                 }
827         case MP_ALLOC_XMEM:
828         case MP_ALLOC_XMEM_HUGE:
829                 {
830                         int heap_socket;
831                         bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
832
833                         if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
834                                 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
835
836                         heap_socket =
837                                 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
838                         if (heap_socket < 0)
839                                 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
840
841                         TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
842                                         rte_mbuf_best_mempool_ops());
843                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
844                                         mb_mempool_cache, 0, mbuf_seg_size,
845                                         heap_socket);
846                         break;
847                 }
848         default:
849                 {
850                         rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
851                 }
852         }
853
854 err:
855         if (rte_mp == NULL) {
856                 rte_exit(EXIT_FAILURE,
857                         "Creation of mbuf pool for socket %u failed: %s\n",
858                         socket_id, rte_strerror(rte_errno));
859         } else if (verbose_level > 0) {
860                 rte_mempool_dump(stdout, rte_mp);
861         }
862 }
863
864 /*
865  * Check given socket id is valid or not with NUMA mode,
866  * if valid, return 0, else return -1
867  */
868 static int
869 check_socket_id(const unsigned int socket_id)
870 {
871         static int warning_once = 0;
872
873         if (new_socket_id(socket_id)) {
874                 if (!warning_once && numa_support)
875                         printf("Warning: NUMA should be configured manually by"
876                                " using --port-numa-config and"
877                                " --ring-numa-config parameters along with"
878                                " --numa.\n");
879                 warning_once = 1;
880                 return -1;
881         }
882         return 0;
883 }
884
885 /*
886  * Get the allowed maximum number of RX queues.
887  * *pid return the port id which has minimal value of
888  * max_rx_queues in all ports.
889  */
890 queueid_t
891 get_allowed_max_nb_rxq(portid_t *pid)
892 {
893         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
894         portid_t pi;
895         struct rte_eth_dev_info dev_info;
896
897         RTE_ETH_FOREACH_DEV(pi) {
898                 rte_eth_dev_info_get(pi, &dev_info);
899                 if (dev_info.max_rx_queues < allowed_max_rxq) {
900                         allowed_max_rxq = dev_info.max_rx_queues;
901                         *pid = pi;
902                 }
903         }
904         return allowed_max_rxq;
905 }
906
907 /*
908  * Check input rxq is valid or not.
909  * If input rxq is not greater than any of maximum number
910  * of RX queues of all ports, it is valid.
911  * if valid, return 0, else return -1
912  */
913 int
914 check_nb_rxq(queueid_t rxq)
915 {
916         queueid_t allowed_max_rxq;
917         portid_t pid = 0;
918
919         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
920         if (rxq > allowed_max_rxq) {
921                 printf("Fail: input rxq (%u) can't be greater "
922                        "than max_rx_queues (%u) of port %u\n",
923                        rxq,
924                        allowed_max_rxq,
925                        pid);
926                 return -1;
927         }
928         return 0;
929 }
930
931 /*
932  * Get the allowed maximum number of TX queues.
933  * *pid return the port id which has minimal value of
934  * max_tx_queues in all ports.
935  */
936 queueid_t
937 get_allowed_max_nb_txq(portid_t *pid)
938 {
939         queueid_t allowed_max_txq = MAX_QUEUE_ID;
940         portid_t pi;
941         struct rte_eth_dev_info dev_info;
942
943         RTE_ETH_FOREACH_DEV(pi) {
944                 rte_eth_dev_info_get(pi, &dev_info);
945                 if (dev_info.max_tx_queues < allowed_max_txq) {
946                         allowed_max_txq = dev_info.max_tx_queues;
947                         *pid = pi;
948                 }
949         }
950         return allowed_max_txq;
951 }
952
953 /*
954  * Check input txq is valid or not.
955  * If input txq is not greater than any of maximum number
956  * of TX queues of all ports, it is valid.
957  * if valid, return 0, else return -1
958  */
959 int
960 check_nb_txq(queueid_t txq)
961 {
962         queueid_t allowed_max_txq;
963         portid_t pid = 0;
964
965         allowed_max_txq = get_allowed_max_nb_txq(&pid);
966         if (txq > allowed_max_txq) {
967                 printf("Fail: input txq (%u) can't be greater "
968                        "than max_tx_queues (%u) of port %u\n",
969                        txq,
970                        allowed_max_txq,
971                        pid);
972                 return -1;
973         }
974         return 0;
975 }
976
977 static void
978 init_config(void)
979 {
980         portid_t pid;
981         struct rte_port *port;
982         struct rte_mempool *mbp;
983         unsigned int nb_mbuf_per_pool;
984         lcoreid_t  lc_id;
985         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
986         struct rte_gro_param gro_param;
987         uint32_t gso_types;
988         int k;
989
990         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
991
992         if (numa_support) {
993                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
994                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
995                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
996         }
997
998         /* Configuration of logical cores. */
999         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1000                                 sizeof(struct fwd_lcore *) * nb_lcores,
1001                                 RTE_CACHE_LINE_SIZE);
1002         if (fwd_lcores == NULL) {
1003                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1004                                                         "failed\n", nb_lcores);
1005         }
1006         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1007                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1008                                                sizeof(struct fwd_lcore),
1009                                                RTE_CACHE_LINE_SIZE);
1010                 if (fwd_lcores[lc_id] == NULL) {
1011                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1012                                                                 "failed\n");
1013                 }
1014                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1015         }
1016
1017         RTE_ETH_FOREACH_DEV(pid) {
1018                 port = &ports[pid];
1019                 /* Apply default TxRx configuration for all ports */
1020                 port->dev_conf.txmode = tx_mode;
1021                 port->dev_conf.rxmode = rx_mode;
1022                 rte_eth_dev_info_get(pid, &port->dev_info);
1023
1024                 if (!(port->dev_info.tx_offload_capa &
1025                       DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1026                         port->dev_conf.txmode.offloads &=
1027                                 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1028                 if (numa_support) {
1029                         if (port_numa[pid] != NUMA_NO_CONFIG)
1030                                 port_per_socket[port_numa[pid]]++;
1031                         else {
1032                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1033
1034                                 /* if socket_id is invalid, set to 0 */
1035                                 if (check_socket_id(socket_id) < 0)
1036                                         socket_id = 0;
1037                                 port_per_socket[socket_id]++;
1038                         }
1039                 }
1040
1041                 /* Apply Rx offloads configuration */
1042                 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1043                         port->rx_conf[k].offloads =
1044                                 port->dev_conf.rxmode.offloads;
1045                 /* Apply Tx offloads configuration */
1046                 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1047                         port->tx_conf[k].offloads =
1048                                 port->dev_conf.txmode.offloads;
1049
1050                 /* set flag to initialize port/queue */
1051                 port->need_reconfig = 1;
1052                 port->need_reconfig_queues = 1;
1053         }
1054
1055         /*
1056          * Create pools of mbuf.
1057          * If NUMA support is disabled, create a single pool of mbuf in
1058          * socket 0 memory by default.
1059          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1060          *
1061          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1062          * nb_txd can be configured at run time.
1063          */
1064         if (param_total_num_mbufs)
1065                 nb_mbuf_per_pool = param_total_num_mbufs;
1066         else {
1067                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1068                         (nb_lcores * mb_mempool_cache) +
1069                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1070                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1071         }
1072
1073         if (numa_support) {
1074                 uint8_t i;
1075
1076                 for (i = 0; i < num_sockets; i++)
1077                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1078                                          socket_ids[i]);
1079         } else {
1080                 if (socket_num == UMA_NO_CONFIG)
1081                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1082                 else
1083                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1084                                                  socket_num);
1085         }
1086
1087         init_port_config();
1088
1089         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1090                 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1091         /*
1092          * Records which Mbuf pool to use by each logical core, if needed.
1093          */
1094         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1095                 mbp = mbuf_pool_find(
1096                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1097
1098                 if (mbp == NULL)
1099                         mbp = mbuf_pool_find(0);
1100                 fwd_lcores[lc_id]->mbp = mbp;
1101                 /* initialize GSO context */
1102                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1103                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1104                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1105                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1106                         ETHER_CRC_LEN;
1107                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1108         }
1109
1110         /* Configuration of packet forwarding streams. */
1111         if (init_fwd_streams() < 0)
1112                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1113
1114         fwd_config_setup();
1115
1116         /* create a gro context for each lcore */
1117         gro_param.gro_types = RTE_GRO_TCP_IPV4;
1118         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1119         gro_param.max_item_per_flow = MAX_PKT_BURST;
1120         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1121                 gro_param.socket_id = rte_lcore_to_socket_id(
1122                                 fwd_lcores_cpuids[lc_id]);
1123                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1124                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1125                         rte_exit(EXIT_FAILURE,
1126                                         "rte_gro_ctx_create() failed\n");
1127                 }
1128         }
1129
1130 #if defined RTE_LIBRTE_PMD_SOFTNIC
1131         if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1132                 RTE_ETH_FOREACH_DEV(pid) {
1133                         port = &ports[pid];
1134                         const char *driver = port->dev_info.driver_name;
1135
1136                         if (strcmp(driver, "net_softnic") == 0)
1137                                 port->softport.fwd_lcore_arg = fwd_lcores;
1138                 }
1139         }
1140 #endif
1141
1142 }
1143
1144
1145 void
1146 reconfig(portid_t new_port_id, unsigned socket_id)
1147 {
1148         struct rte_port *port;
1149
1150         /* Reconfiguration of Ethernet ports. */
1151         port = &ports[new_port_id];
1152         rte_eth_dev_info_get(new_port_id, &port->dev_info);
1153
1154         /* set flag to initialize port/queue */
1155         port->need_reconfig = 1;
1156         port->need_reconfig_queues = 1;
1157         port->socket_id = socket_id;
1158
1159         init_port_config();
1160 }
1161
1162
1163 int
1164 init_fwd_streams(void)
1165 {
1166         portid_t pid;
1167         struct rte_port *port;
1168         streamid_t sm_id, nb_fwd_streams_new;
1169         queueid_t q;
1170
1171         /* set socket id according to numa or not */
1172         RTE_ETH_FOREACH_DEV(pid) {
1173                 port = &ports[pid];
1174                 if (nb_rxq > port->dev_info.max_rx_queues) {
1175                         printf("Fail: nb_rxq(%d) is greater than "
1176                                 "max_rx_queues(%d)\n", nb_rxq,
1177                                 port->dev_info.max_rx_queues);
1178                         return -1;
1179                 }
1180                 if (nb_txq > port->dev_info.max_tx_queues) {
1181                         printf("Fail: nb_txq(%d) is greater than "
1182                                 "max_tx_queues(%d)\n", nb_txq,
1183                                 port->dev_info.max_tx_queues);
1184                         return -1;
1185                 }
1186                 if (numa_support) {
1187                         if (port_numa[pid] != NUMA_NO_CONFIG)
1188                                 port->socket_id = port_numa[pid];
1189                         else {
1190                                 port->socket_id = rte_eth_dev_socket_id(pid);
1191
1192                                 /* if socket_id is invalid, set to 0 */
1193                                 if (check_socket_id(port->socket_id) < 0)
1194                                         port->socket_id = 0;
1195                         }
1196                 }
1197                 else {
1198                         if (socket_num == UMA_NO_CONFIG)
1199                                 port->socket_id = 0;
1200                         else
1201                                 port->socket_id = socket_num;
1202                 }
1203         }
1204
1205         q = RTE_MAX(nb_rxq, nb_txq);
1206         if (q == 0) {
1207                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1208                 return -1;
1209         }
1210         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1211         if (nb_fwd_streams_new == nb_fwd_streams)
1212                 return 0;
1213         /* clear the old */
1214         if (fwd_streams != NULL) {
1215                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1216                         if (fwd_streams[sm_id] == NULL)
1217                                 continue;
1218                         rte_free(fwd_streams[sm_id]);
1219                         fwd_streams[sm_id] = NULL;
1220                 }
1221                 rte_free(fwd_streams);
1222                 fwd_streams = NULL;
1223         }
1224
1225         /* init new */
1226         nb_fwd_streams = nb_fwd_streams_new;
1227         if (nb_fwd_streams) {
1228                 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1229                         sizeof(struct fwd_stream *) * nb_fwd_streams,
1230                         RTE_CACHE_LINE_SIZE);
1231                 if (fwd_streams == NULL)
1232                         rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1233                                  " (struct fwd_stream *)) failed\n",
1234                                  nb_fwd_streams);
1235
1236                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1237                         fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1238                                 " struct fwd_stream", sizeof(struct fwd_stream),
1239                                 RTE_CACHE_LINE_SIZE);
1240                         if (fwd_streams[sm_id] == NULL)
1241                                 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1242                                          "(struct fwd_stream) failed\n");
1243                 }
1244         }
1245
1246         return 0;
1247 }
1248
1249 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1250 static void
1251 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1252 {
1253         unsigned int total_burst;
1254         unsigned int nb_burst;
1255         unsigned int burst_stats[3];
1256         uint16_t pktnb_stats[3];
1257         uint16_t nb_pkt;
1258         int burst_percent[3];
1259
1260         /*
1261          * First compute the total number of packet bursts and the
1262          * two highest numbers of bursts of the same number of packets.
1263          */
1264         total_burst = 0;
1265         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1266         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1267         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1268                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1269                 if (nb_burst == 0)
1270                         continue;
1271                 total_burst += nb_burst;
1272                 if (nb_burst > burst_stats[0]) {
1273                         burst_stats[1] = burst_stats[0];
1274                         pktnb_stats[1] = pktnb_stats[0];
1275                         burst_stats[0] = nb_burst;
1276                         pktnb_stats[0] = nb_pkt;
1277                 } else if (nb_burst > burst_stats[1]) {
1278                         burst_stats[1] = nb_burst;
1279                         pktnb_stats[1] = nb_pkt;
1280                 }
1281         }
1282         if (total_burst == 0)
1283                 return;
1284         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1285         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1286                burst_percent[0], (int) pktnb_stats[0]);
1287         if (burst_stats[0] == total_burst) {
1288                 printf("]\n");
1289                 return;
1290         }
1291         if (burst_stats[0] + burst_stats[1] == total_burst) {
1292                 printf(" + %d%% of %d pkts]\n",
1293                        100 - burst_percent[0], pktnb_stats[1]);
1294                 return;
1295         }
1296         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1297         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1298         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1299                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1300                 return;
1301         }
1302         printf(" + %d%% of %d pkts + %d%% of others]\n",
1303                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1304 }
1305 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1306
1307 static void
1308 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1309 {
1310         struct rte_port *port;
1311         uint8_t i;
1312
1313         static const char *fwd_stats_border = "----------------------";
1314
1315         port = &ports[port_id];
1316         printf("\n  %s Forward statistics for port %-2d %s\n",
1317                fwd_stats_border, port_id, fwd_stats_border);
1318
1319         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1320                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321                        "%-"PRIu64"\n",
1322                        stats->ipackets, stats->imissed,
1323                        (uint64_t) (stats->ipackets + stats->imissed));
1324
1325                 if (cur_fwd_eng == &csum_fwd_engine)
1326                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1327                                port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1328                                port->rx_bad_outer_l4_csum);
1329                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1330                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1331                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1332                 }
1333
1334                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1335                        "%-"PRIu64"\n",
1336                        stats->opackets, port->tx_dropped,
1337                        (uint64_t) (stats->opackets + port->tx_dropped));
1338         }
1339         else {
1340                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1341                        "%14"PRIu64"\n",
1342                        stats->ipackets, stats->imissed,
1343                        (uint64_t) (stats->ipackets + stats->imissed));
1344
1345                 if (cur_fwd_eng == &csum_fwd_engine)
1346                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1347                                port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1348                                port->rx_bad_outer_l4_csum);
1349                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1350                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1351                         printf("  RX-nombufs:             %14"PRIu64"\n",
1352                                stats->rx_nombuf);
1353                 }
1354
1355                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1356                        "%14"PRIu64"\n",
1357                        stats->opackets, port->tx_dropped,
1358                        (uint64_t) (stats->opackets + port->tx_dropped));
1359         }
1360
1361 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1362         if (port->rx_stream)
1363                 pkt_burst_stats_display("RX",
1364                         &port->rx_stream->rx_burst_stats);
1365         if (port->tx_stream)
1366                 pkt_burst_stats_display("TX",
1367                         &port->tx_stream->tx_burst_stats);
1368 #endif
1369
1370         if (port->rx_queue_stats_mapping_enabled) {
1371                 printf("\n");
1372                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1373                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1374                                "     RX-errors:%14"PRIu64
1375                                "    RX-bytes:%14"PRIu64"\n",
1376                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1377                 }
1378                 printf("\n");
1379         }
1380         if (port->tx_queue_stats_mapping_enabled) {
1381                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1382                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1383                                "                                 TX-bytes:%14"PRIu64"\n",
1384                                i, stats->q_opackets[i], stats->q_obytes[i]);
1385                 }
1386         }
1387
1388         printf("  %s--------------------------------%s\n",
1389                fwd_stats_border, fwd_stats_border);
1390 }
1391
1392 static void
1393 fwd_stream_stats_display(streamid_t stream_id)
1394 {
1395         struct fwd_stream *fs;
1396         static const char *fwd_top_stats_border = "-------";
1397
1398         fs = fwd_streams[stream_id];
1399         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1400             (fs->fwd_dropped == 0))
1401                 return;
1402         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1403                "TX Port=%2d/Queue=%2d %s\n",
1404                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1405                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1406         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1407                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1408
1409         /* if checksum mode */
1410         if (cur_fwd_eng == &csum_fwd_engine) {
1411                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1412                         "%-14u Rx- bad outer L4 checksum: %-14u\n",
1413                         fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1414                         fs->rx_bad_outer_l4_csum);
1415         }
1416
1417 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1418         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1419         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1420 #endif
1421 }
1422
1423 static void
1424 flush_fwd_rx_queues(void)
1425 {
1426         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1427         portid_t  rxp;
1428         portid_t port_id;
1429         queueid_t rxq;
1430         uint16_t  nb_rx;
1431         uint16_t  i;
1432         uint8_t   j;
1433         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1434         uint64_t timer_period;
1435
1436         /* convert to number of cycles */
1437         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1438
1439         for (j = 0; j < 2; j++) {
1440                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1441                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1442                                 port_id = fwd_ports_ids[rxp];
1443                                 /**
1444                                 * testpmd can stuck in the below do while loop
1445                                 * if rte_eth_rx_burst() always returns nonzero
1446                                 * packets. So timer is added to exit this loop
1447                                 * after 1sec timer expiry.
1448                                 */
1449                                 prev_tsc = rte_rdtsc();
1450                                 do {
1451                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1452                                                 pkts_burst, MAX_PKT_BURST);
1453                                         for (i = 0; i < nb_rx; i++)
1454                                                 rte_pktmbuf_free(pkts_burst[i]);
1455
1456                                         cur_tsc = rte_rdtsc();
1457                                         diff_tsc = cur_tsc - prev_tsc;
1458                                         timer_tsc += diff_tsc;
1459                                 } while ((nb_rx > 0) &&
1460                                         (timer_tsc < timer_period));
1461                                 timer_tsc = 0;
1462                         }
1463                 }
1464                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1465         }
1466 }
1467
1468 static void
1469 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1470 {
1471         struct fwd_stream **fsm;
1472         streamid_t nb_fs;
1473         streamid_t sm_id;
1474 #ifdef RTE_LIBRTE_BITRATE
1475         uint64_t tics_per_1sec;
1476         uint64_t tics_datum;
1477         uint64_t tics_current;
1478         uint16_t i, cnt_ports;
1479
1480         cnt_ports = nb_ports;
1481         tics_datum = rte_rdtsc();
1482         tics_per_1sec = rte_get_timer_hz();
1483 #endif
1484         fsm = &fwd_streams[fc->stream_idx];
1485         nb_fs = fc->stream_nb;
1486         do {
1487                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1488                         (*pkt_fwd)(fsm[sm_id]);
1489 #ifdef RTE_LIBRTE_BITRATE
1490                 if (bitrate_enabled != 0 &&
1491                                 bitrate_lcore_id == rte_lcore_id()) {
1492                         tics_current = rte_rdtsc();
1493                         if (tics_current - tics_datum >= tics_per_1sec) {
1494                                 /* Periodic bitrate calculation */
1495                                 for (i = 0; i < cnt_ports; i++)
1496                                         rte_stats_bitrate_calc(bitrate_data,
1497                                                 ports_ids[i]);
1498                                 tics_datum = tics_current;
1499                         }
1500                 }
1501 #endif
1502 #ifdef RTE_LIBRTE_LATENCY_STATS
1503                 if (latencystats_enabled != 0 &&
1504                                 latencystats_lcore_id == rte_lcore_id())
1505                         rte_latencystats_update();
1506 #endif
1507
1508         } while (! fc->stopped);
1509 }
1510
1511 static int
1512 start_pkt_forward_on_core(void *fwd_arg)
1513 {
1514         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1515                              cur_fwd_config.fwd_eng->packet_fwd);
1516         return 0;
1517 }
1518
1519 /*
1520  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1521  * Used to start communication flows in network loopback test configurations.
1522  */
1523 static int
1524 run_one_txonly_burst_on_core(void *fwd_arg)
1525 {
1526         struct fwd_lcore *fwd_lc;
1527         struct fwd_lcore tmp_lcore;
1528
1529         fwd_lc = (struct fwd_lcore *) fwd_arg;
1530         tmp_lcore = *fwd_lc;
1531         tmp_lcore.stopped = 1;
1532         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1533         return 0;
1534 }
1535
1536 /*
1537  * Launch packet forwarding:
1538  *     - Setup per-port forwarding context.
1539  *     - launch logical cores with their forwarding configuration.
1540  */
1541 static void
1542 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1543 {
1544         port_fwd_begin_t port_fwd_begin;
1545         unsigned int i;
1546         unsigned int lc_id;
1547         int diag;
1548
1549         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1550         if (port_fwd_begin != NULL) {
1551                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1552                         (*port_fwd_begin)(fwd_ports_ids[i]);
1553         }
1554         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1555                 lc_id = fwd_lcores_cpuids[i];
1556                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1557                         fwd_lcores[i]->stopped = 0;
1558                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1559                                                      fwd_lcores[i], lc_id);
1560                         if (diag != 0)
1561                                 printf("launch lcore %u failed - diag=%d\n",
1562                                        lc_id, diag);
1563                 }
1564         }
1565 }
1566
1567 /*
1568  * Update the forward ports list.
1569  */
1570 void
1571 update_fwd_ports(portid_t new_pid)
1572 {
1573         unsigned int i;
1574         unsigned int new_nb_fwd_ports = 0;
1575         int move = 0;
1576
1577         for (i = 0; i < nb_fwd_ports; ++i) {
1578                 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1579                         move = 1;
1580                 else if (move)
1581                         fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1582                 else
1583                         new_nb_fwd_ports++;
1584         }
1585         if (new_pid < RTE_MAX_ETHPORTS)
1586                 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1587
1588         nb_fwd_ports = new_nb_fwd_ports;
1589         nb_cfg_ports = new_nb_fwd_ports;
1590 }
1591
1592 /*
1593  * Launch packet forwarding configuration.
1594  */
1595 void
1596 start_packet_forwarding(int with_tx_first)
1597 {
1598         port_fwd_begin_t port_fwd_begin;
1599         port_fwd_end_t  port_fwd_end;
1600         struct rte_port *port;
1601         unsigned int i;
1602         portid_t   pt_id;
1603         streamid_t sm_id;
1604
1605         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1606                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1607
1608         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1609                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1610
1611         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1612                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1613                 (!nb_rxq || !nb_txq))
1614                 rte_exit(EXIT_FAILURE,
1615                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1616                         cur_fwd_eng->fwd_mode_name);
1617
1618         if (all_ports_started() == 0) {
1619                 printf("Not all ports were started\n");
1620                 return;
1621         }
1622         if (test_done == 0) {
1623                 printf("Packet forwarding already started\n");
1624                 return;
1625         }
1626
1627
1628         if(dcb_test) {
1629                 for (i = 0; i < nb_fwd_ports; i++) {
1630                         pt_id = fwd_ports_ids[i];
1631                         port = &ports[pt_id];
1632                         if (!port->dcb_flag) {
1633                                 printf("In DCB mode, all forwarding ports must "
1634                                        "be configured in this mode.\n");
1635                                 return;
1636                         }
1637                 }
1638                 if (nb_fwd_lcores == 1) {
1639                         printf("In DCB mode,the nb forwarding cores "
1640                                "should be larger than 1.\n");
1641                         return;
1642                 }
1643         }
1644         test_done = 0;
1645
1646         fwd_config_setup();
1647
1648         if(!no_flush_rx)
1649                 flush_fwd_rx_queues();
1650
1651         pkt_fwd_config_display(&cur_fwd_config);
1652         rxtx_config_display();
1653
1654         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1655                 pt_id = fwd_ports_ids[i];
1656                 port = &ports[pt_id];
1657                 rte_eth_stats_get(pt_id, &port->stats);
1658                 port->tx_dropped = 0;
1659
1660                 map_port_queue_stats_mapping_registers(pt_id, port);
1661         }
1662         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1663                 fwd_streams[sm_id]->rx_packets = 0;
1664                 fwd_streams[sm_id]->tx_packets = 0;
1665                 fwd_streams[sm_id]->fwd_dropped = 0;
1666                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1667                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1668                 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1669
1670 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1671                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1672                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1673                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1674                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1675 #endif
1676 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1677                 fwd_streams[sm_id]->core_cycles = 0;
1678 #endif
1679         }
1680         if (with_tx_first) {
1681                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1682                 if (port_fwd_begin != NULL) {
1683                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1684                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1685                 }
1686                 while (with_tx_first--) {
1687                         launch_packet_forwarding(
1688                                         run_one_txonly_burst_on_core);
1689                         rte_eal_mp_wait_lcore();
1690                 }
1691                 port_fwd_end = tx_only_engine.port_fwd_end;
1692                 if (port_fwd_end != NULL) {
1693                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1694                                 (*port_fwd_end)(fwd_ports_ids[i]);
1695                 }
1696         }
1697         launch_packet_forwarding(start_pkt_forward_on_core);
1698 }
1699
1700 void
1701 stop_packet_forwarding(void)
1702 {
1703         struct rte_eth_stats stats;
1704         struct rte_port *port;
1705         port_fwd_end_t  port_fwd_end;
1706         int i;
1707         portid_t   pt_id;
1708         streamid_t sm_id;
1709         lcoreid_t  lc_id;
1710         uint64_t total_recv;
1711         uint64_t total_xmit;
1712         uint64_t total_rx_dropped;
1713         uint64_t total_tx_dropped;
1714         uint64_t total_rx_nombuf;
1715         uint64_t tx_dropped;
1716         uint64_t rx_bad_ip_csum;
1717         uint64_t rx_bad_l4_csum;
1718 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1719         uint64_t fwd_cycles;
1720 #endif
1721
1722         static const char *acc_stats_border = "+++++++++++++++";
1723
1724         if (test_done) {
1725                 printf("Packet forwarding not started\n");
1726                 return;
1727         }
1728         printf("Telling cores to stop...");
1729         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1730                 fwd_lcores[lc_id]->stopped = 1;
1731         printf("\nWaiting for lcores to finish...\n");
1732         rte_eal_mp_wait_lcore();
1733         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1734         if (port_fwd_end != NULL) {
1735                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1736                         pt_id = fwd_ports_ids[i];
1737                         (*port_fwd_end)(pt_id);
1738                 }
1739         }
1740 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1741         fwd_cycles = 0;
1742 #endif
1743         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1744                 if (cur_fwd_config.nb_fwd_streams >
1745                     cur_fwd_config.nb_fwd_ports) {
1746                         fwd_stream_stats_display(sm_id);
1747                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1748                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1749                 } else {
1750                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1751                                 fwd_streams[sm_id];
1752                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1753                                 fwd_streams[sm_id];
1754                 }
1755                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1756                 tx_dropped = (uint64_t) (tx_dropped +
1757                                          fwd_streams[sm_id]->fwd_dropped);
1758                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1759
1760                 rx_bad_ip_csum =
1761                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1762                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1763                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1764                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1765                                                         rx_bad_ip_csum;
1766
1767                 rx_bad_l4_csum =
1768                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1769                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1770                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1771                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1772                                                         rx_bad_l4_csum;
1773
1774                 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1775                                 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1776
1777 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1778                 fwd_cycles = (uint64_t) (fwd_cycles +
1779                                          fwd_streams[sm_id]->core_cycles);
1780 #endif
1781         }
1782         total_recv = 0;
1783         total_xmit = 0;
1784         total_rx_dropped = 0;
1785         total_tx_dropped = 0;
1786         total_rx_nombuf  = 0;
1787         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1788                 pt_id = fwd_ports_ids[i];
1789
1790                 port = &ports[pt_id];
1791                 rte_eth_stats_get(pt_id, &stats);
1792                 stats.ipackets -= port->stats.ipackets;
1793                 port->stats.ipackets = 0;
1794                 stats.opackets -= port->stats.opackets;
1795                 port->stats.opackets = 0;
1796                 stats.ibytes   -= port->stats.ibytes;
1797                 port->stats.ibytes = 0;
1798                 stats.obytes   -= port->stats.obytes;
1799                 port->stats.obytes = 0;
1800                 stats.imissed  -= port->stats.imissed;
1801                 port->stats.imissed = 0;
1802                 stats.oerrors  -= port->stats.oerrors;
1803                 port->stats.oerrors = 0;
1804                 stats.rx_nombuf -= port->stats.rx_nombuf;
1805                 port->stats.rx_nombuf = 0;
1806
1807                 total_recv += stats.ipackets;
1808                 total_xmit += stats.opackets;
1809                 total_rx_dropped += stats.imissed;
1810                 total_tx_dropped += port->tx_dropped;
1811                 total_rx_nombuf  += stats.rx_nombuf;
1812
1813                 fwd_port_stats_display(pt_id, &stats);
1814         }
1815
1816         printf("\n  %s Accumulated forward statistics for all ports"
1817                "%s\n",
1818                acc_stats_border, acc_stats_border);
1819         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1820                "%-"PRIu64"\n"
1821                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1822                "%-"PRIu64"\n",
1823                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1824                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1825         if (total_rx_nombuf > 0)
1826                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1827         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1828                "%s\n",
1829                acc_stats_border, acc_stats_border);
1830 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1831         if (total_recv > 0)
1832                 printf("\n  CPU cycles/packet=%u (total cycles="
1833                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1834                        (unsigned int)(fwd_cycles / total_recv),
1835                        fwd_cycles, total_recv);
1836 #endif
1837         printf("\nDone.\n");
1838         test_done = 1;
1839 }
1840
1841 void
1842 dev_set_link_up(portid_t pid)
1843 {
1844         if (rte_eth_dev_set_link_up(pid) < 0)
1845                 printf("\nSet link up fail.\n");
1846 }
1847
1848 void
1849 dev_set_link_down(portid_t pid)
1850 {
1851         if (rte_eth_dev_set_link_down(pid) < 0)
1852                 printf("\nSet link down fail.\n");
1853 }
1854
1855 static int
1856 all_ports_started(void)
1857 {
1858         portid_t pi;
1859         struct rte_port *port;
1860
1861         RTE_ETH_FOREACH_DEV(pi) {
1862                 port = &ports[pi];
1863                 /* Check if there is a port which is not started */
1864                 if ((port->port_status != RTE_PORT_STARTED) &&
1865                         (port->slave_flag == 0))
1866                         return 0;
1867         }
1868
1869         /* No port is not started */
1870         return 1;
1871 }
1872
1873 int
1874 port_is_stopped(portid_t port_id)
1875 {
1876         struct rte_port *port = &ports[port_id];
1877
1878         if ((port->port_status != RTE_PORT_STOPPED) &&
1879             (port->slave_flag == 0))
1880                 return 0;
1881         return 1;
1882 }
1883
1884 int
1885 all_ports_stopped(void)
1886 {
1887         portid_t pi;
1888
1889         RTE_ETH_FOREACH_DEV(pi) {
1890                 if (!port_is_stopped(pi))
1891                         return 0;
1892         }
1893
1894         return 1;
1895 }
1896
1897 int
1898 port_is_started(portid_t port_id)
1899 {
1900         if (port_id_is_invalid(port_id, ENABLED_WARN))
1901                 return 0;
1902
1903         if (ports[port_id].port_status != RTE_PORT_STARTED)
1904                 return 0;
1905
1906         return 1;
1907 }
1908
1909 static int
1910 port_is_closed(portid_t port_id)
1911 {
1912         if (port_id_is_invalid(port_id, ENABLED_WARN))
1913                 return 0;
1914
1915         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1916                 return 0;
1917
1918         return 1;
1919 }
1920
1921 int
1922 start_port(portid_t pid)
1923 {
1924         int diag, need_check_link_status = -1;
1925         portid_t pi;
1926         queueid_t qi;
1927         struct rte_port *port;
1928         struct ether_addr mac_addr;
1929         enum rte_eth_event_type event_type;
1930
1931         if (port_id_is_invalid(pid, ENABLED_WARN))
1932                 return 0;
1933
1934         if(dcb_config)
1935                 dcb_test = 1;
1936         RTE_ETH_FOREACH_DEV(pi) {
1937                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1938                         continue;
1939
1940                 need_check_link_status = 0;
1941                 port = &ports[pi];
1942                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1943                                                  RTE_PORT_HANDLING) == 0) {
1944                         printf("Port %d is now not stopped\n", pi);
1945                         continue;
1946                 }
1947
1948                 if (port->need_reconfig > 0) {
1949                         port->need_reconfig = 0;
1950
1951                         if (flow_isolate_all) {
1952                                 int ret = port_flow_isolate(pi, 1);
1953                                 if (ret) {
1954                                         printf("Failed to apply isolated"
1955                                                " mode on port %d\n", pi);
1956                                         return -1;
1957                                 }
1958                         }
1959
1960                         printf("Configuring Port %d (socket %u)\n", pi,
1961                                         port->socket_id);
1962                         /* configure port */
1963                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1964                                                 &(port->dev_conf));
1965                         if (diag != 0) {
1966                                 if (rte_atomic16_cmpset(&(port->port_status),
1967                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1968                                         printf("Port %d can not be set back "
1969                                                         "to stopped\n", pi);
1970                                 printf("Fail to configure port %d\n", pi);
1971                                 /* try to reconfigure port next time */
1972                                 port->need_reconfig = 1;
1973                                 return -1;
1974                         }
1975                 }
1976                 if (port->need_reconfig_queues > 0) {
1977                         port->need_reconfig_queues = 0;
1978                         /* setup tx queues */
1979                         for (qi = 0; qi < nb_txq; qi++) {
1980                                 if ((numa_support) &&
1981                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1982                                         diag = rte_eth_tx_queue_setup(pi, qi,
1983                                                 port->nb_tx_desc[qi],
1984                                                 txring_numa[pi],
1985                                                 &(port->tx_conf[qi]));
1986                                 else
1987                                         diag = rte_eth_tx_queue_setup(pi, qi,
1988                                                 port->nb_tx_desc[qi],
1989                                                 port->socket_id,
1990                                                 &(port->tx_conf[qi]));
1991
1992                                 if (diag == 0)
1993                                         continue;
1994
1995                                 /* Fail to setup tx queue, return */
1996                                 if (rte_atomic16_cmpset(&(port->port_status),
1997                                                         RTE_PORT_HANDLING,
1998                                                         RTE_PORT_STOPPED) == 0)
1999                                         printf("Port %d can not be set back "
2000                                                         "to stopped\n", pi);
2001                                 printf("Fail to configure port %d tx queues\n",
2002                                        pi);
2003                                 /* try to reconfigure queues next time */
2004                                 port->need_reconfig_queues = 1;
2005                                 return -1;
2006                         }
2007                         for (qi = 0; qi < nb_rxq; qi++) {
2008                                 /* setup rx queues */
2009                                 if ((numa_support) &&
2010                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2011                                         struct rte_mempool * mp =
2012                                                 mbuf_pool_find(rxring_numa[pi]);
2013                                         if (mp == NULL) {
2014                                                 printf("Failed to setup RX queue:"
2015                                                         "No mempool allocation"
2016                                                         " on the socket %d\n",
2017                                                         rxring_numa[pi]);
2018                                                 return -1;
2019                                         }
2020
2021                                         diag = rte_eth_rx_queue_setup(pi, qi,
2022                                              port->nb_rx_desc[qi],
2023                                              rxring_numa[pi],
2024                                              &(port->rx_conf[qi]),
2025                                              mp);
2026                                 } else {
2027                                         struct rte_mempool *mp =
2028                                                 mbuf_pool_find(port->socket_id);
2029                                         if (mp == NULL) {
2030                                                 printf("Failed to setup RX queue:"
2031                                                         "No mempool allocation"
2032                                                         " on the socket %d\n",
2033                                                         port->socket_id);
2034                                                 return -1;
2035                                         }
2036                                         diag = rte_eth_rx_queue_setup(pi, qi,
2037                                              port->nb_rx_desc[qi],
2038                                              port->socket_id,
2039                                              &(port->rx_conf[qi]),
2040                                              mp);
2041                                 }
2042                                 if (diag == 0)
2043                                         continue;
2044
2045                                 /* Fail to setup rx queue, return */
2046                                 if (rte_atomic16_cmpset(&(port->port_status),
2047                                                         RTE_PORT_HANDLING,
2048                                                         RTE_PORT_STOPPED) == 0)
2049                                         printf("Port %d can not be set back "
2050                                                         "to stopped\n", pi);
2051                                 printf("Fail to configure port %d rx queues\n",
2052                                        pi);
2053                                 /* try to reconfigure queues next time */
2054                                 port->need_reconfig_queues = 1;
2055                                 return -1;
2056                         }
2057                 }
2058
2059                 /* start port */
2060                 if (rte_eth_dev_start(pi) < 0) {
2061                         printf("Fail to start port %d\n", pi);
2062
2063                         /* Fail to setup rx queue, return */
2064                         if (rte_atomic16_cmpset(&(port->port_status),
2065                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2066                                 printf("Port %d can not be set back to "
2067                                                         "stopped\n", pi);
2068                         continue;
2069                 }
2070
2071                 if (rte_atomic16_cmpset(&(port->port_status),
2072                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2073                         printf("Port %d can not be set into started\n", pi);
2074
2075                 rte_eth_macaddr_get(pi, &mac_addr);
2076                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2077                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2078                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2079                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2080
2081                 /* at least one port started, need checking link status */
2082                 need_check_link_status = 1;
2083         }
2084
2085         for (event_type = RTE_ETH_EVENT_UNKNOWN;
2086              event_type < RTE_ETH_EVENT_MAX;
2087              event_type++) {
2088                 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2089                                                 event_type,
2090                                                 eth_event_callback,
2091                                                 NULL);
2092                 if (diag) {
2093                         printf("Failed to setup even callback for event %d\n",
2094                                 event_type);
2095                         return -1;
2096                 }
2097         }
2098
2099         if (need_check_link_status == 1 && !no_link_check)
2100                 check_all_ports_link_status(RTE_PORT_ALL);
2101         else if (need_check_link_status == 0)
2102                 printf("Please stop the ports first\n");
2103
2104         printf("Done\n");
2105         return 0;
2106 }
2107
2108 void
2109 stop_port(portid_t pid)
2110 {
2111         portid_t pi;
2112         struct rte_port *port;
2113         int need_check_link_status = 0;
2114
2115         if (dcb_test) {
2116                 dcb_test = 0;
2117                 dcb_config = 0;
2118         }
2119
2120         if (port_id_is_invalid(pid, ENABLED_WARN))
2121                 return;
2122
2123         printf("Stopping ports...\n");
2124
2125         RTE_ETH_FOREACH_DEV(pi) {
2126                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2127                         continue;
2128
2129                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2130                         printf("Please remove port %d from forwarding configuration.\n", pi);
2131                         continue;
2132                 }
2133
2134                 if (port_is_bonding_slave(pi)) {
2135                         printf("Please remove port %d from bonded device.\n", pi);
2136                         continue;
2137                 }
2138
2139                 port = &ports[pi];
2140                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2141                                                 RTE_PORT_HANDLING) == 0)
2142                         continue;
2143
2144                 rte_eth_dev_stop(pi);
2145
2146                 if (rte_atomic16_cmpset(&(port->port_status),
2147                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2148                         printf("Port %d can not be set into stopped\n", pi);
2149                 need_check_link_status = 1;
2150         }
2151         if (need_check_link_status && !no_link_check)
2152                 check_all_ports_link_status(RTE_PORT_ALL);
2153
2154         printf("Done\n");
2155 }
2156
2157 void
2158 close_port(portid_t pid)
2159 {
2160         portid_t pi;
2161         struct rte_port *port;
2162
2163         if (port_id_is_invalid(pid, ENABLED_WARN))
2164                 return;
2165
2166         printf("Closing ports...\n");
2167
2168         RTE_ETH_FOREACH_DEV(pi) {
2169                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2170                         continue;
2171
2172                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2173                         printf("Please remove port %d from forwarding configuration.\n", pi);
2174                         continue;
2175                 }
2176
2177                 if (port_is_bonding_slave(pi)) {
2178                         printf("Please remove port %d from bonded device.\n", pi);
2179                         continue;
2180                 }
2181
2182                 port = &ports[pi];
2183                 if (rte_atomic16_cmpset(&(port->port_status),
2184                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2185                         printf("Port %d is already closed\n", pi);
2186                         continue;
2187                 }
2188
2189                 if (rte_atomic16_cmpset(&(port->port_status),
2190                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2191                         printf("Port %d is now not stopped\n", pi);
2192                         continue;
2193                 }
2194
2195                 if (port->flow_list)
2196                         port_flow_flush(pi);
2197                 rte_eth_dev_close(pi);
2198
2199                 if (rte_atomic16_cmpset(&(port->port_status),
2200                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2201                         printf("Port %d cannot be set to closed\n", pi);
2202         }
2203
2204         printf("Done\n");
2205 }
2206
2207 void
2208 reset_port(portid_t pid)
2209 {
2210         int diag;
2211         portid_t pi;
2212         struct rte_port *port;
2213
2214         if (port_id_is_invalid(pid, ENABLED_WARN))
2215                 return;
2216
2217         printf("Resetting ports...\n");
2218
2219         RTE_ETH_FOREACH_DEV(pi) {
2220                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2221                         continue;
2222
2223                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2224                         printf("Please remove port %d from forwarding "
2225                                "configuration.\n", pi);
2226                         continue;
2227                 }
2228
2229                 if (port_is_bonding_slave(pi)) {
2230                         printf("Please remove port %d from bonded device.\n",
2231                                pi);
2232                         continue;
2233                 }
2234
2235                 diag = rte_eth_dev_reset(pi);
2236                 if (diag == 0) {
2237                         port = &ports[pi];
2238                         port->need_reconfig = 1;
2239                         port->need_reconfig_queues = 1;
2240                 } else {
2241                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
2242                 }
2243         }
2244
2245         printf("Done\n");
2246 }
2247
2248 static int
2249 eth_dev_event_callback_register(void)
2250 {
2251         int ret;
2252
2253         /* register the device event callback */
2254         ret = rte_dev_event_callback_register(NULL,
2255                 eth_dev_event_callback, NULL);
2256         if (ret) {
2257                 printf("Failed to register device event callback\n");
2258                 return -1;
2259         }
2260
2261         return 0;
2262 }
2263
2264
2265 static int
2266 eth_dev_event_callback_unregister(void)
2267 {
2268         int ret;
2269
2270         /* unregister the device event callback */
2271         ret = rte_dev_event_callback_unregister(NULL,
2272                 eth_dev_event_callback, NULL);
2273         if (ret < 0) {
2274                 printf("Failed to unregister device event callback\n");
2275                 return -1;
2276         }
2277
2278         return 0;
2279 }
2280
2281 void
2282 attach_port(char *identifier)
2283 {
2284         portid_t pi = 0;
2285         unsigned int socket_id;
2286
2287         printf("Attaching a new port...\n");
2288
2289         if (identifier == NULL) {
2290                 printf("Invalid parameters are specified\n");
2291                 return;
2292         }
2293
2294         if (rte_eth_dev_attach(identifier, &pi))
2295                 return;
2296
2297         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2298         /* if socket_id is invalid, set to 0 */
2299         if (check_socket_id(socket_id) < 0)
2300                 socket_id = 0;
2301         reconfig(pi, socket_id);
2302         rte_eth_promiscuous_enable(pi);
2303
2304         ports_ids[nb_ports] = pi;
2305         nb_ports = rte_eth_dev_count_avail();
2306
2307         ports[pi].port_status = RTE_PORT_STOPPED;
2308
2309         update_fwd_ports(pi);
2310
2311         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2312         printf("Done\n");
2313 }
2314
2315 void
2316 detach_port(portid_t port_id)
2317 {
2318         char name[RTE_ETH_NAME_MAX_LEN];
2319         uint16_t i;
2320
2321         printf("Detaching a port...\n");
2322
2323         if (!port_is_closed(port_id)) {
2324                 printf("Please close port first\n");
2325                 return;
2326         }
2327
2328         if (ports[port_id].flow_list)
2329                 port_flow_flush(port_id);
2330
2331         if (rte_eth_dev_detach(port_id, name)) {
2332                 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2333                 return;
2334         }
2335
2336         for (i = 0; i < nb_ports; i++) {
2337                 if (ports_ids[i] == port_id) {
2338                         ports_ids[i] = ports_ids[nb_ports-1];
2339                         ports_ids[nb_ports-1] = 0;
2340                         break;
2341                 }
2342         }
2343         nb_ports = rte_eth_dev_count_avail();
2344
2345         update_fwd_ports(RTE_MAX_ETHPORTS);
2346
2347         printf("Port %u is detached. Now total ports is %d\n",
2348                         port_id, nb_ports);
2349         printf("Done\n");
2350         return;
2351 }
2352
2353 void
2354 pmd_test_exit(void)
2355 {
2356         struct rte_device *device;
2357         portid_t pt_id;
2358         int ret;
2359
2360         if (test_done == 0)
2361                 stop_packet_forwarding();
2362
2363         if (ports != NULL) {
2364                 no_link_check = 1;
2365                 RTE_ETH_FOREACH_DEV(pt_id) {
2366                         printf("\nShutting down port %d...\n", pt_id);
2367                         fflush(stdout);
2368                         stop_port(pt_id);
2369                         close_port(pt_id);
2370
2371                         /*
2372                          * This is a workaround to fix a virtio-user issue that
2373                          * requires to call clean-up routine to remove existing
2374                          * socket.
2375                          * This workaround valid only for testpmd, needs a fix
2376                          * valid for all applications.
2377                          * TODO: Implement proper resource cleanup
2378                          */
2379                         device = rte_eth_devices[pt_id].device;
2380                         if (device && !strcmp(device->driver->name, "net_virtio_user"))
2381                                 detach_port(pt_id);
2382                 }
2383         }
2384
2385         if (hot_plug) {
2386                 ret = rte_dev_event_monitor_stop();
2387                 if (ret)
2388                         RTE_LOG(ERR, EAL,
2389                                 "fail to stop device event monitor.");
2390
2391                 ret = eth_dev_event_callback_unregister();
2392                 if (ret)
2393                         RTE_LOG(ERR, EAL,
2394                                 "fail to unregister all event callbacks.");
2395         }
2396
2397         printf("\nBye...\n");
2398 }
2399
2400 typedef void (*cmd_func_t)(void);
2401 struct pmd_test_command {
2402         const char *cmd_name;
2403         cmd_func_t cmd_func;
2404 };
2405
2406 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2407
2408 /* Check the link status of all ports in up to 9s, and print them finally */
2409 static void
2410 check_all_ports_link_status(uint32_t port_mask)
2411 {
2412 #define CHECK_INTERVAL 100 /* 100ms */
2413 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2414         portid_t portid;
2415         uint8_t count, all_ports_up, print_flag = 0;
2416         struct rte_eth_link link;
2417
2418         printf("Checking link statuses...\n");
2419         fflush(stdout);
2420         for (count = 0; count <= MAX_CHECK_TIME; count++) {
2421                 all_ports_up = 1;
2422                 RTE_ETH_FOREACH_DEV(portid) {
2423                         if ((port_mask & (1 << portid)) == 0)
2424                                 continue;
2425                         memset(&link, 0, sizeof(link));
2426                         rte_eth_link_get_nowait(portid, &link);
2427                         /* print link status if flag set */
2428                         if (print_flag == 1) {
2429                                 if (link.link_status)
2430                                         printf(
2431                                         "Port%d Link Up. speed %u Mbps- %s\n",
2432                                         portid, link.link_speed,
2433                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2434                                         ("full-duplex") : ("half-duplex\n"));
2435                                 else
2436                                         printf("Port %d Link Down\n", portid);
2437                                 continue;
2438                         }
2439                         /* clear all_ports_up flag if any link down */
2440                         if (link.link_status == ETH_LINK_DOWN) {
2441                                 all_ports_up = 0;
2442                                 break;
2443                         }
2444                 }
2445                 /* after finally printing all link status, get out */
2446                 if (print_flag == 1)
2447                         break;
2448
2449                 if (all_ports_up == 0) {
2450                         fflush(stdout);
2451                         rte_delay_ms(CHECK_INTERVAL);
2452                 }
2453
2454                 /* set the print_flag if all ports up or timeout */
2455                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2456                         print_flag = 1;
2457                 }
2458
2459                 if (lsc_interrupt)
2460                         break;
2461         }
2462 }
2463
2464 static void
2465 rmv_event_callback(void *arg)
2466 {
2467         int need_to_start = 0;
2468         int org_no_link_check = no_link_check;
2469         portid_t port_id = (intptr_t)arg;
2470
2471         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2472
2473         if (!test_done && port_is_forwarding(port_id)) {
2474                 need_to_start = 1;
2475                 stop_packet_forwarding();
2476         }
2477         no_link_check = 1;
2478         stop_port(port_id);
2479         no_link_check = org_no_link_check;
2480         close_port(port_id);
2481         detach_port(port_id);
2482         if (need_to_start)
2483                 start_packet_forwarding(0);
2484 }
2485
2486 /* This function is used by the interrupt thread */
2487 static int
2488 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2489                   void *ret_param)
2490 {
2491         static const char * const event_desc[] = {
2492                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2493                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2494                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2495                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2496                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2497                 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2498                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2499                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2500                 [RTE_ETH_EVENT_NEW] = "device probed",
2501                 [RTE_ETH_EVENT_DESTROY] = "device released",
2502                 [RTE_ETH_EVENT_MAX] = NULL,
2503         };
2504
2505         RTE_SET_USED(param);
2506         RTE_SET_USED(ret_param);
2507
2508         if (type >= RTE_ETH_EVENT_MAX) {
2509                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2510                         port_id, __func__, type);
2511                 fflush(stderr);
2512         } else if (event_print_mask & (UINT32_C(1) << type)) {
2513                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2514                         event_desc[type]);
2515                 fflush(stdout);
2516         }
2517
2518         if (port_id_is_invalid(port_id, DISABLED_WARN))
2519                 return 0;
2520
2521         switch (type) {
2522         case RTE_ETH_EVENT_INTR_RMV:
2523                 if (rte_eal_alarm_set(100000,
2524                                 rmv_event_callback, (void *)(intptr_t)port_id))
2525                         fprintf(stderr, "Could not set up deferred device removal\n");
2526                 break;
2527         default:
2528                 break;
2529         }
2530         return 0;
2531 }
2532
2533 /* This function is used by the interrupt thread */
2534 static void
2535 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2536                              __rte_unused void *arg)
2537 {
2538         if (type >= RTE_DEV_EVENT_MAX) {
2539                 fprintf(stderr, "%s called upon invalid event %d\n",
2540                         __func__, type);
2541                 fflush(stderr);
2542         }
2543
2544         switch (type) {
2545         case RTE_DEV_EVENT_REMOVE:
2546                 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2547                         device_name);
2548                 /* TODO: After finish failure handle, begin to stop
2549                  * packet forward, stop port, close port, detach port.
2550                  */
2551                 break;
2552         case RTE_DEV_EVENT_ADD:
2553                 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2554                         device_name);
2555                 /* TODO: After finish kernel driver binding,
2556                  * begin to attach port.
2557                  */
2558                 break;
2559         default:
2560                 break;
2561         }
2562 }
2563
2564 static int
2565 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2566 {
2567         uint16_t i;
2568         int diag;
2569         uint8_t mapping_found = 0;
2570
2571         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2572                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2573                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2574                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2575                                         tx_queue_stats_mappings[i].queue_id,
2576                                         tx_queue_stats_mappings[i].stats_counter_id);
2577                         if (diag != 0)
2578                                 return diag;
2579                         mapping_found = 1;
2580                 }
2581         }
2582         if (mapping_found)
2583                 port->tx_queue_stats_mapping_enabled = 1;
2584         return 0;
2585 }
2586
2587 static int
2588 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2589 {
2590         uint16_t i;
2591         int diag;
2592         uint8_t mapping_found = 0;
2593
2594         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2595                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2596                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2597                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2598                                         rx_queue_stats_mappings[i].queue_id,
2599                                         rx_queue_stats_mappings[i].stats_counter_id);
2600                         if (diag != 0)
2601                                 return diag;
2602                         mapping_found = 1;
2603                 }
2604         }
2605         if (mapping_found)
2606                 port->rx_queue_stats_mapping_enabled = 1;
2607         return 0;
2608 }
2609
2610 static void
2611 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2612 {
2613         int diag = 0;
2614
2615         diag = set_tx_queue_stats_mapping_registers(pi, port);
2616         if (diag != 0) {
2617                 if (diag == -ENOTSUP) {
2618                         port->tx_queue_stats_mapping_enabled = 0;
2619                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2620                 }
2621                 else
2622                         rte_exit(EXIT_FAILURE,
2623                                         "set_tx_queue_stats_mapping_registers "
2624                                         "failed for port id=%d diag=%d\n",
2625                                         pi, diag);
2626         }
2627
2628         diag = set_rx_queue_stats_mapping_registers(pi, port);
2629         if (diag != 0) {
2630                 if (diag == -ENOTSUP) {
2631                         port->rx_queue_stats_mapping_enabled = 0;
2632                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2633                 }
2634                 else
2635                         rte_exit(EXIT_FAILURE,
2636                                         "set_rx_queue_stats_mapping_registers "
2637                                         "failed for port id=%d diag=%d\n",
2638                                         pi, diag);
2639         }
2640 }
2641
2642 static void
2643 rxtx_port_config(struct rte_port *port)
2644 {
2645         uint16_t qid;
2646
2647         for (qid = 0; qid < nb_rxq; qid++) {
2648                 port->rx_conf[qid] = port->dev_info.default_rxconf;
2649
2650                 /* Check if any Rx parameters have been passed */
2651                 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2652                         port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2653
2654                 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2655                         port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2656
2657                 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2658                         port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2659
2660                 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2661                         port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2662
2663                 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2664                         port->rx_conf[qid].rx_drop_en = rx_drop_en;
2665
2666                 port->nb_rx_desc[qid] = nb_rxd;
2667         }
2668
2669         for (qid = 0; qid < nb_txq; qid++) {
2670                 port->tx_conf[qid] = port->dev_info.default_txconf;
2671
2672                 /* Check if any Tx parameters have been passed */
2673                 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2674                         port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2675
2676                 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2677                         port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2678
2679                 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2680                         port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2681
2682                 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2683                         port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2684
2685                 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2686                         port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2687
2688                 port->nb_tx_desc[qid] = nb_txd;
2689         }
2690 }
2691
2692 void
2693 init_port_config(void)
2694 {
2695         portid_t pid;
2696         struct rte_port *port;
2697
2698         RTE_ETH_FOREACH_DEV(pid) {
2699                 port = &ports[pid];
2700                 port->dev_conf.fdir_conf = fdir_conf;
2701                 rte_eth_dev_info_get(pid, &port->dev_info);
2702                 if (nb_rxq > 1) {
2703                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2704                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2705                                 rss_hf & port->dev_info.flow_type_rss_offloads;
2706                 } else {
2707                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2708                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2709                 }
2710
2711                 if (port->dcb_flag == 0) {
2712                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2713                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2714                         else
2715                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2716                 }
2717
2718                 rxtx_port_config(port);
2719
2720                 rte_eth_macaddr_get(pid, &port->eth_addr);
2721
2722                 map_port_queue_stats_mapping_registers(pid, port);
2723 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2724                 rte_pmd_ixgbe_bypass_init(pid);
2725 #endif
2726
2727                 if (lsc_interrupt &&
2728                     (rte_eth_devices[pid].data->dev_flags &
2729                      RTE_ETH_DEV_INTR_LSC))
2730                         port->dev_conf.intr_conf.lsc = 1;
2731                 if (rmv_interrupt &&
2732                     (rte_eth_devices[pid].data->dev_flags &
2733                      RTE_ETH_DEV_INTR_RMV))
2734                         port->dev_conf.intr_conf.rmv = 1;
2735         }
2736 }
2737
2738 void set_port_slave_flag(portid_t slave_pid)
2739 {
2740         struct rte_port *port;
2741
2742         port = &ports[slave_pid];
2743         port->slave_flag = 1;
2744 }
2745
2746 void clear_port_slave_flag(portid_t slave_pid)
2747 {
2748         struct rte_port *port;
2749
2750         port = &ports[slave_pid];
2751         port->slave_flag = 0;
2752 }
2753
2754 uint8_t port_is_bonding_slave(portid_t slave_pid)
2755 {
2756         struct rte_port *port;
2757
2758         port = &ports[slave_pid];
2759         if ((rte_eth_devices[slave_pid].data->dev_flags &
2760             RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2761                 return 1;
2762         return 0;
2763 }
2764
2765 const uint16_t vlan_tags[] = {
2766                 0,  1,  2,  3,  4,  5,  6,  7,
2767                 8,  9, 10, 11,  12, 13, 14, 15,
2768                 16, 17, 18, 19, 20, 21, 22, 23,
2769                 24, 25, 26, 27, 28, 29, 30, 31
2770 };
2771
2772 static  int
2773 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2774                  enum dcb_mode_enable dcb_mode,
2775                  enum rte_eth_nb_tcs num_tcs,
2776                  uint8_t pfc_en)
2777 {
2778         uint8_t i;
2779         int32_t rc;
2780         struct rte_eth_rss_conf rss_conf;
2781
2782         /*
2783          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2784          * given above, and the number of traffic classes available for use.
2785          */
2786         if (dcb_mode == DCB_VT_ENABLED) {
2787                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2788                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2789                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2790                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2791
2792                 /* VMDQ+DCB RX and TX configurations */
2793                 vmdq_rx_conf->enable_default_pool = 0;
2794                 vmdq_rx_conf->default_pool = 0;
2795                 vmdq_rx_conf->nb_queue_pools =
2796                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2797                 vmdq_tx_conf->nb_queue_pools =
2798                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2799
2800                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2801                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2802                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2803                         vmdq_rx_conf->pool_map[i].pools =
2804                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2805                 }
2806                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2807                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2808                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2809                 }
2810
2811                 /* set DCB mode of RX and TX of multiple queues */
2812                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2813                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2814         } else {
2815                 struct rte_eth_dcb_rx_conf *rx_conf =
2816                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2817                 struct rte_eth_dcb_tx_conf *tx_conf =
2818                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2819
2820                 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2821                 if (rc != 0)
2822                         return rc;
2823
2824                 rx_conf->nb_tcs = num_tcs;
2825                 tx_conf->nb_tcs = num_tcs;
2826
2827                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2828                         rx_conf->dcb_tc[i] = i % num_tcs;
2829                         tx_conf->dcb_tc[i] = i % num_tcs;
2830                 }
2831
2832                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2833                 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2834                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2835         }
2836
2837         if (pfc_en)
2838                 eth_conf->dcb_capability_en =
2839                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2840         else
2841                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2842
2843         return 0;
2844 }
2845
2846 int
2847 init_port_dcb_config(portid_t pid,
2848                      enum dcb_mode_enable dcb_mode,
2849                      enum rte_eth_nb_tcs num_tcs,
2850                      uint8_t pfc_en)
2851 {
2852         struct rte_eth_conf port_conf;
2853         struct rte_port *rte_port;
2854         int retval;
2855         uint16_t i;
2856
2857         rte_port = &ports[pid];
2858
2859         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2860         /* Enter DCB configuration status */
2861         dcb_config = 1;
2862
2863         port_conf.rxmode = rte_port->dev_conf.rxmode;
2864         port_conf.txmode = rte_port->dev_conf.txmode;
2865
2866         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2867         retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2868         if (retval < 0)
2869                 return retval;
2870         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2871
2872         /* re-configure the device . */
2873         rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2874
2875         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2876
2877         /* If dev_info.vmdq_pool_base is greater than 0,
2878          * the queue id of vmdq pools is started after pf queues.
2879          */
2880         if (dcb_mode == DCB_VT_ENABLED &&
2881             rte_port->dev_info.vmdq_pool_base > 0) {
2882                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2883                         " for port %d.", pid);
2884                 return -1;
2885         }
2886
2887         /* Assume the ports in testpmd have the same dcb capability
2888          * and has the same number of rxq and txq in dcb mode
2889          */
2890         if (dcb_mode == DCB_VT_ENABLED) {
2891                 if (rte_port->dev_info.max_vfs > 0) {
2892                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2893                         nb_txq = rte_port->dev_info.nb_tx_queues;
2894                 } else {
2895                         nb_rxq = rte_port->dev_info.max_rx_queues;
2896                         nb_txq = rte_port->dev_info.max_tx_queues;
2897                 }
2898         } else {
2899                 /*if vt is disabled, use all pf queues */
2900                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2901                         nb_rxq = rte_port->dev_info.max_rx_queues;
2902                         nb_txq = rte_port->dev_info.max_tx_queues;
2903                 } else {
2904                         nb_rxq = (queueid_t)num_tcs;
2905                         nb_txq = (queueid_t)num_tcs;
2906
2907                 }
2908         }
2909         rx_free_thresh = 64;
2910
2911         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2912
2913         rxtx_port_config(rte_port);
2914         /* VLAN filter */
2915         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2916         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2917                 rx_vft_set(pid, vlan_tags[i], 1);
2918
2919         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2920         map_port_queue_stats_mapping_registers(pid, rte_port);
2921
2922         rte_port->dcb_flag = 1;
2923
2924         return 0;
2925 }
2926
2927 static void
2928 init_port(void)
2929 {
2930         /* Configuration of Ethernet ports. */
2931         ports = rte_zmalloc("testpmd: ports",
2932                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2933                             RTE_CACHE_LINE_SIZE);
2934         if (ports == NULL) {
2935                 rte_exit(EXIT_FAILURE,
2936                                 "rte_zmalloc(%d struct rte_port) failed\n",
2937                                 RTE_MAX_ETHPORTS);
2938         }
2939 }
2940
2941 static void
2942 force_quit(void)
2943 {
2944         pmd_test_exit();
2945         prompt_exit();
2946 }
2947
2948 static void
2949 print_stats(void)
2950 {
2951         uint8_t i;
2952         const char clr[] = { 27, '[', '2', 'J', '\0' };
2953         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2954
2955         /* Clear screen and move to top left */
2956         printf("%s%s", clr, top_left);
2957
2958         printf("\nPort statistics ====================================");
2959         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2960                 nic_stats_display(fwd_ports_ids[i]);
2961 }
2962
2963 static void
2964 signal_handler(int signum)
2965 {
2966         if (signum == SIGINT || signum == SIGTERM) {
2967                 printf("\nSignal %d received, preparing to exit...\n",
2968                                 signum);
2969 #ifdef RTE_LIBRTE_PDUMP
2970                 /* uninitialize packet capture framework */
2971                 rte_pdump_uninit();
2972 #endif
2973 #ifdef RTE_LIBRTE_LATENCY_STATS
2974                 rte_latencystats_uninit();
2975 #endif
2976                 force_quit();
2977                 /* Set flag to indicate the force termination. */
2978                 f_quit = 1;
2979                 /* exit with the expected status */
2980                 signal(signum, SIG_DFL);
2981                 kill(getpid(), signum);
2982         }
2983 }
2984
2985 int
2986 main(int argc, char** argv)
2987 {
2988         int diag;
2989         portid_t port_id;
2990         uint16_t count;
2991         int ret;
2992
2993         signal(SIGINT, signal_handler);
2994         signal(SIGTERM, signal_handler);
2995
2996         diag = rte_eal_init(argc, argv);
2997         if (diag < 0)
2998                 rte_panic("Cannot init EAL\n");
2999
3000         testpmd_logtype = rte_log_register("testpmd");
3001         if (testpmd_logtype < 0)
3002                 rte_panic("Cannot register log type");
3003         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3004
3005 #ifdef RTE_LIBRTE_PDUMP
3006         /* initialize packet capture framework */
3007         rte_pdump_init(NULL);
3008 #endif
3009
3010         count = 0;
3011         RTE_ETH_FOREACH_DEV(port_id) {
3012                 ports_ids[count] = port_id;
3013                 count++;
3014         }
3015         nb_ports = (portid_t) count;
3016         if (nb_ports == 0)
3017                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3018
3019         /* allocate port structures, and init them */
3020         init_port();
3021
3022         set_def_fwd_config();
3023         if (nb_lcores == 0)
3024                 rte_panic("Empty set of forwarding logical cores - check the "
3025                           "core mask supplied in the command parameters\n");
3026
3027         /* Bitrate/latency stats disabled by default */
3028 #ifdef RTE_LIBRTE_BITRATE
3029         bitrate_enabled = 0;
3030 #endif
3031 #ifdef RTE_LIBRTE_LATENCY_STATS
3032         latencystats_enabled = 0;
3033 #endif
3034
3035         /* on FreeBSD, mlockall() is disabled by default */
3036 #ifdef RTE_EXEC_ENV_BSDAPP
3037         do_mlockall = 0;
3038 #else
3039         do_mlockall = 1;
3040 #endif
3041
3042         argc -= diag;
3043         argv += diag;
3044         if (argc > 1)
3045                 launch_args_parse(argc, argv);
3046
3047         if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3048                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3049                         strerror(errno));
3050         }
3051
3052         if (tx_first && interactive)
3053                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3054                                 "interactive mode.\n");
3055
3056         if (tx_first && lsc_interrupt) {
3057                 printf("Warning: lsc_interrupt needs to be off when "
3058                                 " using tx_first. Disabling.\n");
3059                 lsc_interrupt = 0;
3060         }
3061
3062         if (!nb_rxq && !nb_txq)
3063                 printf("Warning: Either rx or tx queues should be non-zero\n");
3064
3065         if (nb_rxq > 1 && nb_rxq > nb_txq)
3066                 printf("Warning: nb_rxq=%d enables RSS configuration, "
3067                        "but nb_txq=%d will prevent to fully test it.\n",
3068                        nb_rxq, nb_txq);
3069
3070         init_config();
3071
3072         if (hot_plug) {
3073                 /* enable hot plug monitoring */
3074                 ret = rte_dev_event_monitor_start();
3075                 if (ret) {
3076                         rte_errno = EINVAL;
3077                         return -1;
3078                 }
3079                 eth_dev_event_callback_register();
3080
3081         }
3082
3083         if (start_port(RTE_PORT_ALL) != 0)
3084                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3085
3086         /* set all ports to promiscuous mode by default */
3087         RTE_ETH_FOREACH_DEV(port_id)
3088                 rte_eth_promiscuous_enable(port_id);
3089
3090         /* Init metrics library */
3091         rte_metrics_init(rte_socket_id());
3092
3093 #ifdef RTE_LIBRTE_LATENCY_STATS
3094         if (latencystats_enabled != 0) {
3095                 int ret = rte_latencystats_init(1, NULL);
3096                 if (ret)
3097                         printf("Warning: latencystats init()"
3098                                 " returned error %d\n", ret);
3099                 printf("Latencystats running on lcore %d\n",
3100                         latencystats_lcore_id);
3101         }
3102 #endif
3103
3104         /* Setup bitrate stats */
3105 #ifdef RTE_LIBRTE_BITRATE
3106         if (bitrate_enabled != 0) {
3107                 bitrate_data = rte_stats_bitrate_create();
3108                 if (bitrate_data == NULL)
3109                         rte_exit(EXIT_FAILURE,
3110                                 "Could not allocate bitrate data.\n");
3111                 rte_stats_bitrate_reg(bitrate_data);
3112         }
3113 #endif
3114
3115 #ifdef RTE_LIBRTE_CMDLINE
3116         if (strlen(cmdline_filename) != 0)
3117                 cmdline_read_from_file(cmdline_filename);
3118
3119         if (interactive == 1) {
3120                 if (auto_start) {
3121                         printf("Start automatic packet forwarding\n");
3122                         start_packet_forwarding(0);
3123                 }
3124                 prompt();
3125                 pmd_test_exit();
3126         } else
3127 #endif
3128         {
3129                 char c;
3130                 int rc;
3131
3132                 f_quit = 0;
3133
3134                 printf("No commandline core given, start packet forwarding\n");
3135                 start_packet_forwarding(tx_first);
3136                 if (stats_period != 0) {
3137                         uint64_t prev_time = 0, cur_time, diff_time = 0;
3138                         uint64_t timer_period;
3139
3140                         /* Convert to number of cycles */
3141                         timer_period = stats_period * rte_get_timer_hz();
3142
3143                         while (f_quit == 0) {
3144                                 cur_time = rte_get_timer_cycles();
3145                                 diff_time += cur_time - prev_time;
3146
3147                                 if (diff_time >= timer_period) {
3148                                         print_stats();
3149                                         /* Reset the timer */
3150                                         diff_time = 0;
3151                                 }
3152                                 /* Sleep to avoid unnecessary checks */
3153                                 prev_time = cur_time;
3154                                 sleep(1);
3155                         }
3156                 }
3157
3158                 printf("Press enter to exit\n");
3159                 rc = read(0, &c, 1);
3160                 pmd_test_exit();
3161                 if (rc < 0)
3162                         return 1;
3163         }
3164
3165         return 0;
3166 }