dd216f606ac95d0f7523f309c4e0f2a3ccacecf6
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
99
100 /*
101  * NUMA support configuration.
102  * When set, the NUMA support attempts to dispatch the allocation of the
103  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104  * probed ports among the CPU sockets 0 and 1.
105  * Otherwise, all memory is allocated from CPU socket 0.
106  */
107 uint8_t numa_support = 1; /**< numa enabled by default */
108
109 /*
110  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111  * not configured.
112  */
113 uint8_t socket_num = UMA_NO_CONFIG;
114
115 /*
116  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
117  */
118 uint8_t mp_anon = 0;
119
120 /*
121  * Record the Ethernet address of peer target ports to which packets are
122  * forwarded.
123  * Must be instantiated with the ethernet addresses of peer traffic generator
124  * ports.
125  */
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
128
129 /*
130  * Probed Target Environment.
131  */
132 struct rte_port *ports;        /**< For all probed ethernet ports. */
133 portid_t nb_ports;             /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
136
137 /*
138  * Test Forwarding Configuration.
139  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
141  */
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
145 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
146
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
149
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
152
153 /*
154  * Forwarding engines.
155  */
156 struct fwd_engine * fwd_engines[] = {
157         &io_fwd_engine,
158         &mac_fwd_engine,
159         &mac_swap_engine,
160         &flow_gen_engine,
161         &rx_only_engine,
162         &tx_only_engine,
163         &csum_fwd_engine,
164         &icmp_echo_engine,
165 #ifdef RTE_LIBRTE_IEEE1588
166         &ieee1588_fwd_engine,
167 #endif
168         NULL,
169 };
170
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
179                                       * specified on command-line. */
180
181 /*
182  * Configuration of packet segments used by the "txonly" processing engine.
183  */
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186         TXONLY_DEF_PACKET_LEN,
187 };
188 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
192
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
198
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
201
202 /*
203  * Configurable number of RX/TX queues.
204  */
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
207
208 /*
209  * Configurable number of RX/TX ring descriptors.
210  */
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215
216 #define RTE_PMD_PARAM_UNSET -1
217 /*
218  * Configurable values of RX and TX ring threshold registers.
219  */
220
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
228
229 /*
230  * Configurable value of RX free threshold.
231  */
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of RX drop enable.
236  */
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of TX free threshold.
241  */
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Configurable value of TX RS bit threshold.
246  */
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
248
249 /*
250  * Configurable value of TX queue flags.
251  */
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
253
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268
269 /*
270  * Avoids to check link status when starting/stopping a port.
271  */
272 uint8_t no_link_check = 0; /* check by default */
273
274 /*
275  * Enable link status change notification
276  */
277 uint8_t lsc_interrupt = 1; /* enabled by default */
278
279 /*
280  * Enable device removal notification.
281  */
282 uint8_t rmv_interrupt = 1; /* enabled by default */
283
284 /*
285  * NIC bypass mode configuration options.
286  */
287 #ifdef RTE_NIC_BYPASS
288
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
291
292 #endif
293
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305
306 #endif
307
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313         .split_hdr_size = 0,
314         .header_split   = 0, /**< Header Split disabled. */
315         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
318         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
320         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
321 };
322
323 struct rte_fdir_conf fdir_conf = {
324         .mode = RTE_FDIR_MODE_NONE,
325         .pballoc = RTE_FDIR_PBALLOC_64K,
326         .status = RTE_FDIR_REPORT_STATUS,
327         .mask = {
328                 .vlan_tci_mask = 0x0,
329                 .ipv4_mask     = {
330                         .src_ip = 0xFFFFFFFF,
331                         .dst_ip = 0xFFFFFFFF,
332                 },
333                 .ipv6_mask     = {
334                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336                 },
337                 .src_port_mask = 0xFFFF,
338                 .dst_port_mask = 0xFFFF,
339                 .mac_addr_byte_mask = 0xFF,
340                 .tunnel_type_mask = 1,
341                 .tunnel_id_mask = 0xFFFFFFFF,
342         },
343         .drop_queue = 127,
344 };
345
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
356
357 unsigned max_socket = 0;
358
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
364 #endif
365
366 /* Forward function declarations */
367 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
368 static void check_all_ports_link_status(uint32_t port_mask);
369 static void eth_event_callback(uint8_t port_id,
370                                enum rte_eth_event_type type,
371                                void *param);
372
373 /*
374  * Check if all the ports are started.
375  * If yes, return positive value. If not, return zero.
376  */
377 static int all_ports_started(void);
378
379 /*
380  * Setup default configuration.
381  */
382 static void
383 set_default_fwd_lcores_config(void)
384 {
385         unsigned int i;
386         unsigned int nb_lc;
387         unsigned int sock_num;
388
389         nb_lc = 0;
390         for (i = 0; i < RTE_MAX_LCORE; i++) {
391                 sock_num = rte_lcore_to_socket_id(i) + 1;
392                 if (sock_num > max_socket) {
393                         if (sock_num > RTE_MAX_NUMA_NODES)
394                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
395                         max_socket = sock_num;
396                 }
397                 if (!rte_lcore_is_enabled(i))
398                         continue;
399                 if (i == rte_get_master_lcore())
400                         continue;
401                 fwd_lcores_cpuids[nb_lc++] = i;
402         }
403         nb_lcores = (lcoreid_t) nb_lc;
404         nb_cfg_lcores = nb_lcores;
405         nb_fwd_lcores = 1;
406 }
407
408 static void
409 set_def_peer_eth_addrs(void)
410 {
411         portid_t i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
415                 peer_eth_addrs[i].addr_bytes[5] = i;
416         }
417 }
418
419 static void
420 set_default_fwd_ports_config(void)
421 {
422         portid_t pt_id;
423
424         for (pt_id = 0; pt_id < nb_ports; pt_id++)
425                 fwd_ports_ids[pt_id] = pt_id;
426
427         nb_cfg_ports = nb_ports;
428         nb_fwd_ports = nb_ports;
429 }
430
431 void
432 set_def_fwd_config(void)
433 {
434         set_default_fwd_lcores_config();
435         set_def_peer_eth_addrs();
436         set_default_fwd_ports_config();
437 }
438
439 /*
440  * Configuration initialisation done once at init time.
441  */
442 static void
443 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
444                  unsigned int socket_id)
445 {
446         char pool_name[RTE_MEMPOOL_NAMESIZE];
447         struct rte_mempool *rte_mp = NULL;
448         uint32_t mb_size;
449
450         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
451         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
452
453         RTE_LOG(INFO, USER1,
454                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
455                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
456
457 #ifdef RTE_LIBRTE_PMD_XENVIRT
458         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
459                 (unsigned) mb_mempool_cache,
460                 sizeof(struct rte_pktmbuf_pool_private),
461                 rte_pktmbuf_pool_init, NULL,
462                 rte_pktmbuf_init, NULL,
463                 socket_id, 0);
464 #endif
465
466         /* if the former XEN allocation failed fall back to normal allocation */
467         if (rte_mp == NULL) {
468                 if (mp_anon != 0) {
469                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
470                                 mb_size, (unsigned) mb_mempool_cache,
471                                 sizeof(struct rte_pktmbuf_pool_private),
472                                 socket_id, 0);
473                         if (rte_mp == NULL)
474                                 goto err;
475
476                         if (rte_mempool_populate_anon(rte_mp) == 0) {
477                                 rte_mempool_free(rte_mp);
478                                 rte_mp = NULL;
479                                 goto err;
480                         }
481                         rte_pktmbuf_pool_init(rte_mp, NULL);
482                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
483                 } else {
484                         /* wrapper to rte_mempool_create() */
485                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
486                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
487                 }
488         }
489
490 err:
491         if (rte_mp == NULL) {
492                 rte_exit(EXIT_FAILURE,
493                         "Creation of mbuf pool for socket %u failed: %s\n",
494                         socket_id, rte_strerror(rte_errno));
495         } else if (verbose_level > 0) {
496                 rte_mempool_dump(stdout, rte_mp);
497         }
498 }
499
500 /*
501  * Check given socket id is valid or not with NUMA mode,
502  * if valid, return 0, else return -1
503  */
504 static int
505 check_socket_id(const unsigned int socket_id)
506 {
507         static int warning_once = 0;
508
509         if (socket_id >= max_socket) {
510                 if (!warning_once && numa_support)
511                         printf("Warning: NUMA should be configured manually by"
512                                " using --port-numa-config and"
513                                " --ring-numa-config parameters along with"
514                                " --numa.\n");
515                 warning_once = 1;
516                 return -1;
517         }
518         return 0;
519 }
520
521 static void
522 init_config(void)
523 {
524         portid_t pid;
525         struct rte_port *port;
526         struct rte_mempool *mbp;
527         unsigned int nb_mbuf_per_pool;
528         lcoreid_t  lc_id;
529         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
530
531         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
532
533         if (numa_support) {
534                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
535                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
536                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
537         }
538
539         /* Configuration of logical cores. */
540         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
541                                 sizeof(struct fwd_lcore *) * nb_lcores,
542                                 RTE_CACHE_LINE_SIZE);
543         if (fwd_lcores == NULL) {
544                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
545                                                         "failed\n", nb_lcores);
546         }
547         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
548                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
549                                                sizeof(struct fwd_lcore),
550                                                RTE_CACHE_LINE_SIZE);
551                 if (fwd_lcores[lc_id] == NULL) {
552                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
553                                                                 "failed\n");
554                 }
555                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
556         }
557
558         RTE_ETH_FOREACH_DEV(pid) {
559                 port = &ports[pid];
560                 rte_eth_dev_info_get(pid, &port->dev_info);
561
562                 if (numa_support) {
563                         if (port_numa[pid] != NUMA_NO_CONFIG)
564                                 port_per_socket[port_numa[pid]]++;
565                         else {
566                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
567
568                                 /* if socket_id is invalid, set to 0 */
569                                 if (check_socket_id(socket_id) < 0)
570                                         socket_id = 0;
571                                 port_per_socket[socket_id]++;
572                         }
573                 }
574
575                 /* set flag to initialize port/queue */
576                 port->need_reconfig = 1;
577                 port->need_reconfig_queues = 1;
578         }
579
580         /*
581          * Create pools of mbuf.
582          * If NUMA support is disabled, create a single pool of mbuf in
583          * socket 0 memory by default.
584          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
585          *
586          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
587          * nb_txd can be configured at run time.
588          */
589         if (param_total_num_mbufs)
590                 nb_mbuf_per_pool = param_total_num_mbufs;
591         else {
592                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
593                         (nb_lcores * mb_mempool_cache) +
594                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
595                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
596         }
597
598         if (numa_support) {
599                 uint8_t i;
600
601                 for (i = 0; i < max_socket; i++)
602                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
603         } else {
604                 if (socket_num == UMA_NO_CONFIG)
605                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
606                 else
607                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
608                                                  socket_num);
609         }
610
611         init_port_config();
612
613         /*
614          * Records which Mbuf pool to use by each logical core, if needed.
615          */
616         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617                 mbp = mbuf_pool_find(
618                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
619
620                 if (mbp == NULL)
621                         mbp = mbuf_pool_find(0);
622                 fwd_lcores[lc_id]->mbp = mbp;
623         }
624
625         /* Configuration of packet forwarding streams. */
626         if (init_fwd_streams() < 0)
627                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
628
629         fwd_config_setup();
630 }
631
632
633 void
634 reconfig(portid_t new_port_id, unsigned socket_id)
635 {
636         struct rte_port *port;
637
638         /* Reconfiguration of Ethernet ports. */
639         port = &ports[new_port_id];
640         rte_eth_dev_info_get(new_port_id, &port->dev_info);
641
642         /* set flag to initialize port/queue */
643         port->need_reconfig = 1;
644         port->need_reconfig_queues = 1;
645         port->socket_id = socket_id;
646
647         init_port_config();
648 }
649
650
651 int
652 init_fwd_streams(void)
653 {
654         portid_t pid;
655         struct rte_port *port;
656         streamid_t sm_id, nb_fwd_streams_new;
657         queueid_t q;
658
659         /* set socket id according to numa or not */
660         RTE_ETH_FOREACH_DEV(pid) {
661                 port = &ports[pid];
662                 if (nb_rxq > port->dev_info.max_rx_queues) {
663                         printf("Fail: nb_rxq(%d) is greater than "
664                                 "max_rx_queues(%d)\n", nb_rxq,
665                                 port->dev_info.max_rx_queues);
666                         return -1;
667                 }
668                 if (nb_txq > port->dev_info.max_tx_queues) {
669                         printf("Fail: nb_txq(%d) is greater than "
670                                 "max_tx_queues(%d)\n", nb_txq,
671                                 port->dev_info.max_tx_queues);
672                         return -1;
673                 }
674                 if (numa_support) {
675                         if (port_numa[pid] != NUMA_NO_CONFIG)
676                                 port->socket_id = port_numa[pid];
677                         else {
678                                 port->socket_id = rte_eth_dev_socket_id(pid);
679
680                                 /* if socket_id is invalid, set to 0 */
681                                 if (check_socket_id(port->socket_id) < 0)
682                                         port->socket_id = 0;
683                         }
684                 }
685                 else {
686                         if (socket_num == UMA_NO_CONFIG)
687                                 port->socket_id = 0;
688                         else
689                                 port->socket_id = socket_num;
690                 }
691         }
692
693         q = RTE_MAX(nb_rxq, nb_txq);
694         if (q == 0) {
695                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
696                 return -1;
697         }
698         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
699         if (nb_fwd_streams_new == nb_fwd_streams)
700                 return 0;
701         /* clear the old */
702         if (fwd_streams != NULL) {
703                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704                         if (fwd_streams[sm_id] == NULL)
705                                 continue;
706                         rte_free(fwd_streams[sm_id]);
707                         fwd_streams[sm_id] = NULL;
708                 }
709                 rte_free(fwd_streams);
710                 fwd_streams = NULL;
711         }
712
713         /* init new */
714         nb_fwd_streams = nb_fwd_streams_new;
715         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
717         if (fwd_streams == NULL)
718                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719                                                 "failed\n", nb_fwd_streams);
720
721         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
724                 if (fwd_streams[sm_id] == NULL)
725                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
726                                                                 " failed\n");
727         }
728
729         return 0;
730 }
731
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
733 static void
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
735 {
736         unsigned int total_burst;
737         unsigned int nb_burst;
738         unsigned int burst_stats[3];
739         uint16_t pktnb_stats[3];
740         uint16_t nb_pkt;
741         int burst_percent[3];
742
743         /*
744          * First compute the total number of packet bursts and the
745          * two highest numbers of bursts of the same number of packets.
746          */
747         total_burst = 0;
748         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
752                 if (nb_burst == 0)
753                         continue;
754                 total_burst += nb_burst;
755                 if (nb_burst > burst_stats[0]) {
756                         burst_stats[1] = burst_stats[0];
757                         pktnb_stats[1] = pktnb_stats[0];
758                         burst_stats[0] = nb_burst;
759                         pktnb_stats[0] = nb_pkt;
760                 }
761         }
762         if (total_burst == 0)
763                 return;
764         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766                burst_percent[0], (int) pktnb_stats[0]);
767         if (burst_stats[0] == total_burst) {
768                 printf("]\n");
769                 return;
770         }
771         if (burst_stats[0] + burst_stats[1] == total_burst) {
772                 printf(" + %d%% of %d pkts]\n",
773                        100 - burst_percent[0], pktnb_stats[1]);
774                 return;
775         }
776         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
780                 return;
781         }
782         printf(" + %d%% of %d pkts + %d%% of others]\n",
783                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
784 }
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
786
787 static void
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
789 {
790         struct rte_port *port;
791         uint8_t i;
792
793         static const char *fwd_stats_border = "----------------------";
794
795         port = &ports[port_id];
796         printf("\n  %s Forward statistics for port %-2d %s\n",
797                fwd_stats_border, port_id, fwd_stats_border);
798
799         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
801                        "%-"PRIu64"\n",
802                        stats->ipackets, stats->imissed,
803                        (uint64_t) (stats->ipackets + stats->imissed));
804
805                 if (cur_fwd_eng == &csum_fwd_engine)
806                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
809                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
810                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
811                 }
812
813                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
814                        "%-"PRIu64"\n",
815                        stats->opackets, port->tx_dropped,
816                        (uint64_t) (stats->opackets + port->tx_dropped));
817         }
818         else {
819                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
820                        "%14"PRIu64"\n",
821                        stats->ipackets, stats->imissed,
822                        (uint64_t) (stats->ipackets + stats->imissed));
823
824                 if (cur_fwd_eng == &csum_fwd_engine)
825                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
826                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
827                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
828                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
829                         printf("  RX-nombufs:             %14"PRIu64"\n",
830                                stats->rx_nombuf);
831                 }
832
833                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
834                        "%14"PRIu64"\n",
835                        stats->opackets, port->tx_dropped,
836                        (uint64_t) (stats->opackets + port->tx_dropped));
837         }
838
839 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
840         if (port->rx_stream)
841                 pkt_burst_stats_display("RX",
842                         &port->rx_stream->rx_burst_stats);
843         if (port->tx_stream)
844                 pkt_burst_stats_display("TX",
845                         &port->tx_stream->tx_burst_stats);
846 #endif
847
848         if (port->rx_queue_stats_mapping_enabled) {
849                 printf("\n");
850                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
851                         printf("  Stats reg %2d RX-packets:%14"PRIu64
852                                "     RX-errors:%14"PRIu64
853                                "    RX-bytes:%14"PRIu64"\n",
854                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
855                 }
856                 printf("\n");
857         }
858         if (port->tx_queue_stats_mapping_enabled) {
859                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
860                         printf("  Stats reg %2d TX-packets:%14"PRIu64
861                                "                                 TX-bytes:%14"PRIu64"\n",
862                                i, stats->q_opackets[i], stats->q_obytes[i]);
863                 }
864         }
865
866         printf("  %s--------------------------------%s\n",
867                fwd_stats_border, fwd_stats_border);
868 }
869
870 static void
871 fwd_stream_stats_display(streamid_t stream_id)
872 {
873         struct fwd_stream *fs;
874         static const char *fwd_top_stats_border = "-------";
875
876         fs = fwd_streams[stream_id];
877         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
878             (fs->fwd_dropped == 0))
879                 return;
880         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
881                "TX Port=%2d/Queue=%2d %s\n",
882                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
883                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
884         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
885                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
886
887         /* if checksum mode */
888         if (cur_fwd_eng == &csum_fwd_engine) {
889                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
890                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
891         }
892
893 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
894         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
895         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
896 #endif
897 }
898
899 static void
900 flush_fwd_rx_queues(void)
901 {
902         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
903         portid_t  rxp;
904         portid_t port_id;
905         queueid_t rxq;
906         uint16_t  nb_rx;
907         uint16_t  i;
908         uint8_t   j;
909         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
910         uint64_t timer_period;
911
912         /* convert to number of cycles */
913         timer_period = rte_get_timer_hz(); /* 1 second timeout */
914
915         for (j = 0; j < 2; j++) {
916                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
917                         for (rxq = 0; rxq < nb_rxq; rxq++) {
918                                 port_id = fwd_ports_ids[rxp];
919                                 /**
920                                 * testpmd can stuck in the below do while loop
921                                 * if rte_eth_rx_burst() always returns nonzero
922                                 * packets. So timer is added to exit this loop
923                                 * after 1sec timer expiry.
924                                 */
925                                 prev_tsc = rte_rdtsc();
926                                 do {
927                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
928                                                 pkts_burst, MAX_PKT_BURST);
929                                         for (i = 0; i < nb_rx; i++)
930                                                 rte_pktmbuf_free(pkts_burst[i]);
931
932                                         cur_tsc = rte_rdtsc();
933                                         diff_tsc = cur_tsc - prev_tsc;
934                                         timer_tsc += diff_tsc;
935                                 } while ((nb_rx > 0) &&
936                                         (timer_tsc < timer_period));
937                                 timer_tsc = 0;
938                         }
939                 }
940                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
941         }
942 }
943
944 static void
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
946 {
947         struct fwd_stream **fsm;
948         streamid_t nb_fs;
949         streamid_t sm_id;
950 #ifdef RTE_LIBRTE_BITRATE
951         uint64_t tics_per_1sec;
952         uint64_t tics_datum;
953         uint64_t tics_current;
954         uint8_t idx_port, cnt_ports;
955
956         cnt_ports = rte_eth_dev_count();
957         tics_datum = rte_rdtsc();
958         tics_per_1sec = rte_get_timer_hz();
959 #endif
960         fsm = &fwd_streams[fc->stream_idx];
961         nb_fs = fc->stream_nb;
962         do {
963                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
964                         (*pkt_fwd)(fsm[sm_id]);
965 #ifdef RTE_LIBRTE_BITRATE
966                 if (bitrate_enabled != 0 &&
967                                 bitrate_lcore_id == rte_lcore_id()) {
968                         tics_current = rte_rdtsc();
969                         if (tics_current - tics_datum >= tics_per_1sec) {
970                                 /* Periodic bitrate calculation */
971                                 for (idx_port = 0;
972                                                 idx_port < cnt_ports;
973                                                 idx_port++)
974                                         rte_stats_bitrate_calc(bitrate_data,
975                                                 idx_port);
976                                 tics_datum = tics_current;
977                         }
978                 }
979 #endif
980 #ifdef RTE_LIBRTE_LATENCY_STATS
981                 if (latencystats_enabled != 0 &&
982                                 latencystats_lcore_id == rte_lcore_id())
983                         rte_latencystats_update();
984 #endif
985
986         } while (! fc->stopped);
987 }
988
989 static int
990 start_pkt_forward_on_core(void *fwd_arg)
991 {
992         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
993                              cur_fwd_config.fwd_eng->packet_fwd);
994         return 0;
995 }
996
997 /*
998  * Run the TXONLY packet forwarding engine to send a single burst of packets.
999  * Used to start communication flows in network loopback test configurations.
1000  */
1001 static int
1002 run_one_txonly_burst_on_core(void *fwd_arg)
1003 {
1004         struct fwd_lcore *fwd_lc;
1005         struct fwd_lcore tmp_lcore;
1006
1007         fwd_lc = (struct fwd_lcore *) fwd_arg;
1008         tmp_lcore = *fwd_lc;
1009         tmp_lcore.stopped = 1;
1010         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1011         return 0;
1012 }
1013
1014 /*
1015  * Launch packet forwarding:
1016  *     - Setup per-port forwarding context.
1017  *     - launch logical cores with their forwarding configuration.
1018  */
1019 static void
1020 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1021 {
1022         port_fwd_begin_t port_fwd_begin;
1023         unsigned int i;
1024         unsigned int lc_id;
1025         int diag;
1026
1027         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1028         if (port_fwd_begin != NULL) {
1029                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1030                         (*port_fwd_begin)(fwd_ports_ids[i]);
1031         }
1032         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1033                 lc_id = fwd_lcores_cpuids[i];
1034                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1035                         fwd_lcores[i]->stopped = 0;
1036                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1037                                                      fwd_lcores[i], lc_id);
1038                         if (diag != 0)
1039                                 printf("launch lcore %u failed - diag=%d\n",
1040                                        lc_id, diag);
1041                 }
1042         }
1043 }
1044
1045 /*
1046  * Launch packet forwarding configuration.
1047  */
1048 void
1049 start_packet_forwarding(int with_tx_first)
1050 {
1051         port_fwd_begin_t port_fwd_begin;
1052         port_fwd_end_t  port_fwd_end;
1053         struct rte_port *port;
1054         unsigned int i;
1055         portid_t   pt_id;
1056         streamid_t sm_id;
1057
1058         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1059                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1060
1061         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1062                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1063
1064         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1065                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1066                 (!nb_rxq || !nb_txq))
1067                 rte_exit(EXIT_FAILURE,
1068                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1069                         cur_fwd_eng->fwd_mode_name);
1070
1071         if (all_ports_started() == 0) {
1072                 printf("Not all ports were started\n");
1073                 return;
1074         }
1075         if (test_done == 0) {
1076                 printf("Packet forwarding already started\n");
1077                 return;
1078         }
1079
1080         if (init_fwd_streams() < 0) {
1081                 printf("Fail from init_fwd_streams()\n");
1082                 return;
1083         }
1084
1085         if(dcb_test) {
1086                 for (i = 0; i < nb_fwd_ports; i++) {
1087                         pt_id = fwd_ports_ids[i];
1088                         port = &ports[pt_id];
1089                         if (!port->dcb_flag) {
1090                                 printf("In DCB mode, all forwarding ports must "
1091                                        "be configured in this mode.\n");
1092                                 return;
1093                         }
1094                 }
1095                 if (nb_fwd_lcores == 1) {
1096                         printf("In DCB mode,the nb forwarding cores "
1097                                "should be larger than 1.\n");
1098                         return;
1099                 }
1100         }
1101         test_done = 0;
1102
1103         if(!no_flush_rx)
1104                 flush_fwd_rx_queues();
1105
1106         fwd_config_setup();
1107         pkt_fwd_config_display(&cur_fwd_config);
1108         rxtx_config_display();
1109
1110         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1111                 pt_id = fwd_ports_ids[i];
1112                 port = &ports[pt_id];
1113                 rte_eth_stats_get(pt_id, &port->stats);
1114                 port->tx_dropped = 0;
1115
1116                 map_port_queue_stats_mapping_registers(pt_id, port);
1117         }
1118         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1119                 fwd_streams[sm_id]->rx_packets = 0;
1120                 fwd_streams[sm_id]->tx_packets = 0;
1121                 fwd_streams[sm_id]->fwd_dropped = 0;
1122                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1123                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1124
1125 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1126                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1127                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1128                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1129                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1130 #endif
1131 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1132                 fwd_streams[sm_id]->core_cycles = 0;
1133 #endif
1134         }
1135         if (with_tx_first) {
1136                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1137                 if (port_fwd_begin != NULL) {
1138                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1139                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1140                 }
1141                 while (with_tx_first--) {
1142                         launch_packet_forwarding(
1143                                         run_one_txonly_burst_on_core);
1144                         rte_eal_mp_wait_lcore();
1145                 }
1146                 port_fwd_end = tx_only_engine.port_fwd_end;
1147                 if (port_fwd_end != NULL) {
1148                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1149                                 (*port_fwd_end)(fwd_ports_ids[i]);
1150                 }
1151         }
1152         launch_packet_forwarding(start_pkt_forward_on_core);
1153 }
1154
1155 void
1156 stop_packet_forwarding(void)
1157 {
1158         struct rte_eth_stats stats;
1159         struct rte_port *port;
1160         port_fwd_end_t  port_fwd_end;
1161         int i;
1162         portid_t   pt_id;
1163         streamid_t sm_id;
1164         lcoreid_t  lc_id;
1165         uint64_t total_recv;
1166         uint64_t total_xmit;
1167         uint64_t total_rx_dropped;
1168         uint64_t total_tx_dropped;
1169         uint64_t total_rx_nombuf;
1170         uint64_t tx_dropped;
1171         uint64_t rx_bad_ip_csum;
1172         uint64_t rx_bad_l4_csum;
1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1174         uint64_t fwd_cycles;
1175 #endif
1176         static const char *acc_stats_border = "+++++++++++++++";
1177
1178         if (test_done) {
1179                 printf("Packet forwarding not started\n");
1180                 return;
1181         }
1182         printf("Telling cores to stop...");
1183         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1184                 fwd_lcores[lc_id]->stopped = 1;
1185         printf("\nWaiting for lcores to finish...\n");
1186         rte_eal_mp_wait_lcore();
1187         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1188         if (port_fwd_end != NULL) {
1189                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1190                         pt_id = fwd_ports_ids[i];
1191                         (*port_fwd_end)(pt_id);
1192                 }
1193         }
1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1195         fwd_cycles = 0;
1196 #endif
1197         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1198                 if (cur_fwd_config.nb_fwd_streams >
1199                     cur_fwd_config.nb_fwd_ports) {
1200                         fwd_stream_stats_display(sm_id);
1201                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1202                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1203                 } else {
1204                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1205                                 fwd_streams[sm_id];
1206                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1207                                 fwd_streams[sm_id];
1208                 }
1209                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1210                 tx_dropped = (uint64_t) (tx_dropped +
1211                                          fwd_streams[sm_id]->fwd_dropped);
1212                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1213
1214                 rx_bad_ip_csum =
1215                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1216                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1217                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1218                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1219                                                         rx_bad_ip_csum;
1220
1221                 rx_bad_l4_csum =
1222                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1223                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1224                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1225                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1226                                                         rx_bad_l4_csum;
1227
1228 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1229                 fwd_cycles = (uint64_t) (fwd_cycles +
1230                                          fwd_streams[sm_id]->core_cycles);
1231 #endif
1232         }
1233         total_recv = 0;
1234         total_xmit = 0;
1235         total_rx_dropped = 0;
1236         total_tx_dropped = 0;
1237         total_rx_nombuf  = 0;
1238         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1239                 pt_id = fwd_ports_ids[i];
1240
1241                 port = &ports[pt_id];
1242                 rte_eth_stats_get(pt_id, &stats);
1243                 stats.ipackets -= port->stats.ipackets;
1244                 port->stats.ipackets = 0;
1245                 stats.opackets -= port->stats.opackets;
1246                 port->stats.opackets = 0;
1247                 stats.ibytes   -= port->stats.ibytes;
1248                 port->stats.ibytes = 0;
1249                 stats.obytes   -= port->stats.obytes;
1250                 port->stats.obytes = 0;
1251                 stats.imissed  -= port->stats.imissed;
1252                 port->stats.imissed = 0;
1253                 stats.oerrors  -= port->stats.oerrors;
1254                 port->stats.oerrors = 0;
1255                 stats.rx_nombuf -= port->stats.rx_nombuf;
1256                 port->stats.rx_nombuf = 0;
1257
1258                 total_recv += stats.ipackets;
1259                 total_xmit += stats.opackets;
1260                 total_rx_dropped += stats.imissed;
1261                 total_tx_dropped += port->tx_dropped;
1262                 total_rx_nombuf  += stats.rx_nombuf;
1263
1264                 fwd_port_stats_display(pt_id, &stats);
1265         }
1266         printf("\n  %s Accumulated forward statistics for all ports"
1267                "%s\n",
1268                acc_stats_border, acc_stats_border);
1269         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1270                "%-"PRIu64"\n"
1271                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1272                "%-"PRIu64"\n",
1273                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1274                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1275         if (total_rx_nombuf > 0)
1276                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1277         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1278                "%s\n",
1279                acc_stats_border, acc_stats_border);
1280 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1281         if (total_recv > 0)
1282                 printf("\n  CPU cycles/packet=%u (total cycles="
1283                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1284                        (unsigned int)(fwd_cycles / total_recv),
1285                        fwd_cycles, total_recv);
1286 #endif
1287         printf("\nDone.\n");
1288         test_done = 1;
1289 }
1290
1291 void
1292 dev_set_link_up(portid_t pid)
1293 {
1294         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1295                 printf("\nSet link up fail.\n");
1296 }
1297
1298 void
1299 dev_set_link_down(portid_t pid)
1300 {
1301         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1302                 printf("\nSet link down fail.\n");
1303 }
1304
1305 static int
1306 all_ports_started(void)
1307 {
1308         portid_t pi;
1309         struct rte_port *port;
1310
1311         RTE_ETH_FOREACH_DEV(pi) {
1312                 port = &ports[pi];
1313                 /* Check if there is a port which is not started */
1314                 if ((port->port_status != RTE_PORT_STARTED) &&
1315                         (port->slave_flag == 0))
1316                         return 0;
1317         }
1318
1319         /* No port is not started */
1320         return 1;
1321 }
1322
1323 int
1324 all_ports_stopped(void)
1325 {
1326         portid_t pi;
1327         struct rte_port *port;
1328
1329         RTE_ETH_FOREACH_DEV(pi) {
1330                 port = &ports[pi];
1331                 if ((port->port_status != RTE_PORT_STOPPED) &&
1332                         (port->slave_flag == 0))
1333                         return 0;
1334         }
1335
1336         return 1;
1337 }
1338
1339 int
1340 port_is_started(portid_t port_id)
1341 {
1342         if (port_id_is_invalid(port_id, ENABLED_WARN))
1343                 return 0;
1344
1345         if (ports[port_id].port_status != RTE_PORT_STARTED)
1346                 return 0;
1347
1348         return 1;
1349 }
1350
1351 static int
1352 port_is_closed(portid_t port_id)
1353 {
1354         if (port_id_is_invalid(port_id, ENABLED_WARN))
1355                 return 0;
1356
1357         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1358                 return 0;
1359
1360         return 1;
1361 }
1362
1363 int
1364 start_port(portid_t pid)
1365 {
1366         int diag, need_check_link_status = -1;
1367         portid_t pi;
1368         queueid_t qi;
1369         struct rte_port *port;
1370         struct ether_addr mac_addr;
1371         enum rte_eth_event_type event_type;
1372
1373         if (port_id_is_invalid(pid, ENABLED_WARN))
1374                 return 0;
1375
1376         if(dcb_config)
1377                 dcb_test = 1;
1378         RTE_ETH_FOREACH_DEV(pi) {
1379                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1380                         continue;
1381
1382                 need_check_link_status = 0;
1383                 port = &ports[pi];
1384                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1385                                                  RTE_PORT_HANDLING) == 0) {
1386                         printf("Port %d is now not stopped\n", pi);
1387                         continue;
1388                 }
1389
1390                 if (port->need_reconfig > 0) {
1391                         port->need_reconfig = 0;
1392
1393                         printf("Configuring Port %d (socket %u)\n", pi,
1394                                         port->socket_id);
1395                         /* configure port */
1396                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1397                                                 &(port->dev_conf));
1398                         if (diag != 0) {
1399                                 if (rte_atomic16_cmpset(&(port->port_status),
1400                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1401                                         printf("Port %d can not be set back "
1402                                                         "to stopped\n", pi);
1403                                 printf("Fail to configure port %d\n", pi);
1404                                 /* try to reconfigure port next time */
1405                                 port->need_reconfig = 1;
1406                                 return -1;
1407                         }
1408                 }
1409                 if (port->need_reconfig_queues > 0) {
1410                         port->need_reconfig_queues = 0;
1411                         /* setup tx queues */
1412                         for (qi = 0; qi < nb_txq; qi++) {
1413                                 if ((numa_support) &&
1414                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1415                                         diag = rte_eth_tx_queue_setup(pi, qi,
1416                                                 nb_txd,txring_numa[pi],
1417                                                 &(port->tx_conf));
1418                                 else
1419                                         diag = rte_eth_tx_queue_setup(pi, qi,
1420                                                 nb_txd,port->socket_id,
1421                                                 &(port->tx_conf));
1422
1423                                 if (diag == 0)
1424                                         continue;
1425
1426                                 /* Fail to setup tx queue, return */
1427                                 if (rte_atomic16_cmpset(&(port->port_status),
1428                                                         RTE_PORT_HANDLING,
1429                                                         RTE_PORT_STOPPED) == 0)
1430                                         printf("Port %d can not be set back "
1431                                                         "to stopped\n", pi);
1432                                 printf("Fail to configure port %d tx queues\n", pi);
1433                                 /* try to reconfigure queues next time */
1434                                 port->need_reconfig_queues = 1;
1435                                 return -1;
1436                         }
1437                         /* setup rx queues */
1438                         for (qi = 0; qi < nb_rxq; qi++) {
1439                                 if ((numa_support) &&
1440                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1441                                         struct rte_mempool * mp =
1442                                                 mbuf_pool_find(rxring_numa[pi]);
1443                                         if (mp == NULL) {
1444                                                 printf("Failed to setup RX queue:"
1445                                                         "No mempool allocation"
1446                                                         " on the socket %d\n",
1447                                                         rxring_numa[pi]);
1448                                                 return -1;
1449                                         }
1450
1451                                         diag = rte_eth_rx_queue_setup(pi, qi,
1452                                              nb_rxd,rxring_numa[pi],
1453                                              &(port->rx_conf),mp);
1454                                 } else {
1455                                         struct rte_mempool *mp =
1456                                                 mbuf_pool_find(port->socket_id);
1457                                         if (mp == NULL) {
1458                                                 printf("Failed to setup RX queue:"
1459                                                         "No mempool allocation"
1460                                                         " on the socket %d\n",
1461                                                         port->socket_id);
1462                                                 return -1;
1463                                         }
1464                                         diag = rte_eth_rx_queue_setup(pi, qi,
1465                                              nb_rxd,port->socket_id,
1466                                              &(port->rx_conf), mp);
1467                                 }
1468                                 if (diag == 0)
1469                                         continue;
1470
1471                                 /* Fail to setup rx queue, return */
1472                                 if (rte_atomic16_cmpset(&(port->port_status),
1473                                                         RTE_PORT_HANDLING,
1474                                                         RTE_PORT_STOPPED) == 0)
1475                                         printf("Port %d can not be set back "
1476                                                         "to stopped\n", pi);
1477                                 printf("Fail to configure port %d rx queues\n", pi);
1478                                 /* try to reconfigure queues next time */
1479                                 port->need_reconfig_queues = 1;
1480                                 return -1;
1481                         }
1482                 }
1483
1484                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1485                      event_type < RTE_ETH_EVENT_MAX;
1486                      event_type++) {
1487                         diag = rte_eth_dev_callback_register(pi,
1488                                                         event_type,
1489                                                         eth_event_callback,
1490                                                         NULL);
1491                         if (diag) {
1492                                 printf("Failed to setup even callback for event %d\n",
1493                                         event_type);
1494                                 return -1;
1495                         }
1496                 }
1497
1498                 /* start port */
1499                 if (rte_eth_dev_start(pi) < 0) {
1500                         printf("Fail to start port %d\n", pi);
1501
1502                         /* Fail to setup rx queue, return */
1503                         if (rte_atomic16_cmpset(&(port->port_status),
1504                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1505                                 printf("Port %d can not be set back to "
1506                                                         "stopped\n", pi);
1507                         continue;
1508                 }
1509
1510                 if (rte_atomic16_cmpset(&(port->port_status),
1511                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1512                         printf("Port %d can not be set into started\n", pi);
1513
1514                 rte_eth_macaddr_get(pi, &mac_addr);
1515                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1516                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1517                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1518                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1519
1520                 /* at least one port started, need checking link status */
1521                 need_check_link_status = 1;
1522         }
1523
1524         if (need_check_link_status == 1 && !no_link_check)
1525                 check_all_ports_link_status(RTE_PORT_ALL);
1526         else if (need_check_link_status == 0)
1527                 printf("Please stop the ports first\n");
1528
1529         printf("Done\n");
1530         return 0;
1531 }
1532
1533 void
1534 stop_port(portid_t pid)
1535 {
1536         portid_t pi;
1537         struct rte_port *port;
1538         int need_check_link_status = 0;
1539
1540         if (dcb_test) {
1541                 dcb_test = 0;
1542                 dcb_config = 0;
1543         }
1544
1545         if (port_id_is_invalid(pid, ENABLED_WARN))
1546                 return;
1547
1548         printf("Stopping ports...\n");
1549
1550         RTE_ETH_FOREACH_DEV(pi) {
1551                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1552                         continue;
1553
1554                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1555                         printf("Please remove port %d from forwarding configuration.\n", pi);
1556                         continue;
1557                 }
1558
1559                 if (port_is_bonding_slave(pi)) {
1560                         printf("Please remove port %d from bonded device.\n", pi);
1561                         continue;
1562                 }
1563
1564                 port = &ports[pi];
1565                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1566                                                 RTE_PORT_HANDLING) == 0)
1567                         continue;
1568
1569                 rte_eth_dev_stop(pi);
1570
1571                 if (rte_atomic16_cmpset(&(port->port_status),
1572                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1573                         printf("Port %d can not be set into stopped\n", pi);
1574                 need_check_link_status = 1;
1575         }
1576         if (need_check_link_status && !no_link_check)
1577                 check_all_ports_link_status(RTE_PORT_ALL);
1578
1579         printf("Done\n");
1580 }
1581
1582 void
1583 close_port(portid_t pid)
1584 {
1585         portid_t pi;
1586         struct rte_port *port;
1587
1588         if (port_id_is_invalid(pid, ENABLED_WARN))
1589                 return;
1590
1591         printf("Closing ports...\n");
1592
1593         RTE_ETH_FOREACH_DEV(pi) {
1594                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1595                         continue;
1596
1597                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1598                         printf("Please remove port %d from forwarding configuration.\n", pi);
1599                         continue;
1600                 }
1601
1602                 if (port_is_bonding_slave(pi)) {
1603                         printf("Please remove port %d from bonded device.\n", pi);
1604                         continue;
1605                 }
1606
1607                 port = &ports[pi];
1608                 if (rte_atomic16_cmpset(&(port->port_status),
1609                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1610                         printf("Port %d is already closed\n", pi);
1611                         continue;
1612                 }
1613
1614                 if (rte_atomic16_cmpset(&(port->port_status),
1615                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1616                         printf("Port %d is now not stopped\n", pi);
1617                         continue;
1618                 }
1619
1620                 if (port->flow_list)
1621                         port_flow_flush(pi);
1622                 rte_eth_dev_close(pi);
1623
1624                 if (rte_atomic16_cmpset(&(port->port_status),
1625                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1626                         printf("Port %d cannot be set to closed\n", pi);
1627         }
1628
1629         printf("Done\n");
1630 }
1631
1632 void
1633 attach_port(char *identifier)
1634 {
1635         portid_t pi = 0;
1636         unsigned int socket_id;
1637
1638         printf("Attaching a new port...\n");
1639
1640         if (identifier == NULL) {
1641                 printf("Invalid parameters are specified\n");
1642                 return;
1643         }
1644
1645         if (rte_eth_dev_attach(identifier, &pi))
1646                 return;
1647
1648         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1649         /* if socket_id is invalid, set to 0 */
1650         if (check_socket_id(socket_id) < 0)
1651                 socket_id = 0;
1652         reconfig(pi, socket_id);
1653         rte_eth_promiscuous_enable(pi);
1654
1655         nb_ports = rte_eth_dev_count();
1656
1657         ports[pi].port_status = RTE_PORT_STOPPED;
1658
1659         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1660         printf("Done\n");
1661 }
1662
1663 void
1664 detach_port(uint8_t port_id)
1665 {
1666         char name[RTE_ETH_NAME_MAX_LEN];
1667
1668         printf("Detaching a port...\n");
1669
1670         if (!port_is_closed(port_id)) {
1671                 printf("Please close port first\n");
1672                 return;
1673         }
1674
1675         if (ports[port_id].flow_list)
1676                 port_flow_flush(port_id);
1677
1678         if (rte_eth_dev_detach(port_id, name))
1679                 return;
1680
1681         nb_ports = rte_eth_dev_count();
1682
1683         printf("Port '%s' is detached. Now total ports is %d\n",
1684                         name, nb_ports);
1685         printf("Done\n");
1686         return;
1687 }
1688
1689 void
1690 pmd_test_exit(void)
1691 {
1692         portid_t pt_id;
1693
1694         if (test_done == 0)
1695                 stop_packet_forwarding();
1696
1697         if (ports != NULL) {
1698                 no_link_check = 1;
1699                 RTE_ETH_FOREACH_DEV(pt_id) {
1700                         printf("\nShutting down port %d...\n", pt_id);
1701                         fflush(stdout);
1702                         stop_port(pt_id);
1703                         close_port(pt_id);
1704                 }
1705         }
1706         printf("\nBye...\n");
1707 }
1708
1709 typedef void (*cmd_func_t)(void);
1710 struct pmd_test_command {
1711         const char *cmd_name;
1712         cmd_func_t cmd_func;
1713 };
1714
1715 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1716
1717 /* Check the link status of all ports in up to 9s, and print them finally */
1718 static void
1719 check_all_ports_link_status(uint32_t port_mask)
1720 {
1721 #define CHECK_INTERVAL 100 /* 100ms */
1722 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1723         uint8_t portid, count, all_ports_up, print_flag = 0;
1724         struct rte_eth_link link;
1725
1726         printf("Checking link statuses...\n");
1727         fflush(stdout);
1728         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1729                 all_ports_up = 1;
1730                 RTE_ETH_FOREACH_DEV(portid) {
1731                         if ((port_mask & (1 << portid)) == 0)
1732                                 continue;
1733                         memset(&link, 0, sizeof(link));
1734                         rte_eth_link_get_nowait(portid, &link);
1735                         /* print link status if flag set */
1736                         if (print_flag == 1) {
1737                                 if (link.link_status)
1738                                         printf("Port %d Link Up - speed %u "
1739                                                 "Mbps - %s\n", (uint8_t)portid,
1740                                                 (unsigned)link.link_speed,
1741                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1742                                         ("full-duplex") : ("half-duplex\n"));
1743                                 else
1744                                         printf("Port %d Link Down\n",
1745                                                 (uint8_t)portid);
1746                                 continue;
1747                         }
1748                         /* clear all_ports_up flag if any link down */
1749                         if (link.link_status == ETH_LINK_DOWN) {
1750                                 all_ports_up = 0;
1751                                 break;
1752                         }
1753                 }
1754                 /* after finally printing all link status, get out */
1755                 if (print_flag == 1)
1756                         break;
1757
1758                 if (all_ports_up == 0) {
1759                         fflush(stdout);
1760                         rte_delay_ms(CHECK_INTERVAL);
1761                 }
1762
1763                 /* set the print_flag if all ports up or timeout */
1764                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1765                         print_flag = 1;
1766                 }
1767
1768                 if (lsc_interrupt)
1769                         break;
1770         }
1771 }
1772
1773 static void
1774 rmv_event_callback(void *arg)
1775 {
1776         struct rte_eth_dev *dev;
1777         struct rte_devargs *da;
1778         char name[32] = "";
1779         uint8_t port_id = (intptr_t)arg;
1780
1781         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1782         dev = &rte_eth_devices[port_id];
1783         da = dev->device->devargs;
1784
1785         stop_port(port_id);
1786         close_port(port_id);
1787         if (da->type == RTE_DEVTYPE_VIRTUAL)
1788                 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1789         else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1790                 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1791         printf("removing device %s\n", name);
1792         rte_eal_dev_detach(name);
1793         dev->state = RTE_ETH_DEV_UNUSED;
1794 }
1795
1796 /* This function is used by the interrupt thread */
1797 static void
1798 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1799 {
1800         static const char * const event_desc[] = {
1801                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1802                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1803                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1804                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1805                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1806                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1807                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1808                 [RTE_ETH_EVENT_MAX] = NULL,
1809         };
1810
1811         RTE_SET_USED(param);
1812
1813         if (type >= RTE_ETH_EVENT_MAX) {
1814                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1815                         port_id, __func__, type);
1816                 fflush(stderr);
1817         } else {
1818                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1819                         event_desc[type]);
1820                 fflush(stdout);
1821         }
1822
1823         switch (type) {
1824         case RTE_ETH_EVENT_INTR_RMV:
1825                 if (rte_eal_alarm_set(100000,
1826                                 rmv_event_callback, (void *)(intptr_t)port_id))
1827                         fprintf(stderr, "Could not set up deferred device removal\n");
1828                 break;
1829         default:
1830                 break;
1831         }
1832 }
1833
1834 static int
1835 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1836 {
1837         uint16_t i;
1838         int diag;
1839         uint8_t mapping_found = 0;
1840
1841         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1842                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1843                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1844                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1845                                         tx_queue_stats_mappings[i].queue_id,
1846                                         tx_queue_stats_mappings[i].stats_counter_id);
1847                         if (diag != 0)
1848                                 return diag;
1849                         mapping_found = 1;
1850                 }
1851         }
1852         if (mapping_found)
1853                 port->tx_queue_stats_mapping_enabled = 1;
1854         return 0;
1855 }
1856
1857 static int
1858 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1859 {
1860         uint16_t i;
1861         int diag;
1862         uint8_t mapping_found = 0;
1863
1864         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1865                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1866                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1867                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1868                                         rx_queue_stats_mappings[i].queue_id,
1869                                         rx_queue_stats_mappings[i].stats_counter_id);
1870                         if (diag != 0)
1871                                 return diag;
1872                         mapping_found = 1;
1873                 }
1874         }
1875         if (mapping_found)
1876                 port->rx_queue_stats_mapping_enabled = 1;
1877         return 0;
1878 }
1879
1880 static void
1881 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1882 {
1883         int diag = 0;
1884
1885         diag = set_tx_queue_stats_mapping_registers(pi, port);
1886         if (diag != 0) {
1887                 if (diag == -ENOTSUP) {
1888                         port->tx_queue_stats_mapping_enabled = 0;
1889                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1890                 }
1891                 else
1892                         rte_exit(EXIT_FAILURE,
1893                                         "set_tx_queue_stats_mapping_registers "
1894                                         "failed for port id=%d diag=%d\n",
1895                                         pi, diag);
1896         }
1897
1898         diag = set_rx_queue_stats_mapping_registers(pi, port);
1899         if (diag != 0) {
1900                 if (diag == -ENOTSUP) {
1901                         port->rx_queue_stats_mapping_enabled = 0;
1902                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1903                 }
1904                 else
1905                         rte_exit(EXIT_FAILURE,
1906                                         "set_rx_queue_stats_mapping_registers "
1907                                         "failed for port id=%d diag=%d\n",
1908                                         pi, diag);
1909         }
1910 }
1911
1912 static void
1913 rxtx_port_config(struct rte_port *port)
1914 {
1915         port->rx_conf = port->dev_info.default_rxconf;
1916         port->tx_conf = port->dev_info.default_txconf;
1917
1918         /* Check if any RX/TX parameters have been passed */
1919         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1920                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1921
1922         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1923                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1924
1925         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1926                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1927
1928         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1929                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1930
1931         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1932                 port->rx_conf.rx_drop_en = rx_drop_en;
1933
1934         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1935                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1936
1937         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1938                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1939
1940         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1941                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1942
1943         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1944                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1945
1946         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1947                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1948
1949         if (txq_flags != RTE_PMD_PARAM_UNSET)
1950                 port->tx_conf.txq_flags = txq_flags;
1951 }
1952
1953 void
1954 init_port_config(void)
1955 {
1956         portid_t pid;
1957         struct rte_port *port;
1958
1959         RTE_ETH_FOREACH_DEV(pid) {
1960                 port = &ports[pid];
1961                 port->dev_conf.rxmode = rx_mode;
1962                 port->dev_conf.fdir_conf = fdir_conf;
1963                 if (nb_rxq > 1) {
1964                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1965                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1966                 } else {
1967                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1968                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1969                 }
1970
1971                 if (port->dcb_flag == 0) {
1972                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1973                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1974                         else
1975                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1976                 }
1977
1978                 rxtx_port_config(port);
1979
1980                 rte_eth_macaddr_get(pid, &port->eth_addr);
1981
1982                 map_port_queue_stats_mapping_registers(pid, port);
1983 #ifdef RTE_NIC_BYPASS
1984                 rte_eth_dev_bypass_init(pid);
1985 #endif
1986
1987                 if (lsc_interrupt &&
1988                     (rte_eth_devices[pid].data->dev_flags &
1989                      RTE_ETH_DEV_INTR_LSC))
1990                         port->dev_conf.intr_conf.lsc = 1;
1991                 if (rmv_interrupt &&
1992                     (rte_eth_devices[pid].data->dev_flags &
1993                      RTE_ETH_DEV_INTR_RMV))
1994                         port->dev_conf.intr_conf.rmv = 1;
1995         }
1996 }
1997
1998 void set_port_slave_flag(portid_t slave_pid)
1999 {
2000         struct rte_port *port;
2001
2002         port = &ports[slave_pid];
2003         port->slave_flag = 1;
2004 }
2005
2006 void clear_port_slave_flag(portid_t slave_pid)
2007 {
2008         struct rte_port *port;
2009
2010         port = &ports[slave_pid];
2011         port->slave_flag = 0;
2012 }
2013
2014 uint8_t port_is_bonding_slave(portid_t slave_pid)
2015 {
2016         struct rte_port *port;
2017
2018         port = &ports[slave_pid];
2019         return port->slave_flag;
2020 }
2021
2022 const uint16_t vlan_tags[] = {
2023                 0,  1,  2,  3,  4,  5,  6,  7,
2024                 8,  9, 10, 11,  12, 13, 14, 15,
2025                 16, 17, 18, 19, 20, 21, 22, 23,
2026                 24, 25, 26, 27, 28, 29, 30, 31
2027 };
2028
2029 static  int
2030 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2031                  enum dcb_mode_enable dcb_mode,
2032                  enum rte_eth_nb_tcs num_tcs,
2033                  uint8_t pfc_en)
2034 {
2035         uint8_t i;
2036
2037         /*
2038          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2039          * given above, and the number of traffic classes available for use.
2040          */
2041         if (dcb_mode == DCB_VT_ENABLED) {
2042                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2043                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2044                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2045                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2046
2047                 /* VMDQ+DCB RX and TX configurations */
2048                 vmdq_rx_conf->enable_default_pool = 0;
2049                 vmdq_rx_conf->default_pool = 0;
2050                 vmdq_rx_conf->nb_queue_pools =
2051                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2052                 vmdq_tx_conf->nb_queue_pools =
2053                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2054
2055                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2056                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2057                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2058                         vmdq_rx_conf->pool_map[i].pools =
2059                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2060                 }
2061                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2062                         vmdq_rx_conf->dcb_tc[i] = i;
2063                         vmdq_tx_conf->dcb_tc[i] = i;
2064                 }
2065
2066                 /* set DCB mode of RX and TX of multiple queues */
2067                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2068                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2069         } else {
2070                 struct rte_eth_dcb_rx_conf *rx_conf =
2071                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2072                 struct rte_eth_dcb_tx_conf *tx_conf =
2073                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2074
2075                 rx_conf->nb_tcs = num_tcs;
2076                 tx_conf->nb_tcs = num_tcs;
2077
2078                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2079                         rx_conf->dcb_tc[i] = i % num_tcs;
2080                         tx_conf->dcb_tc[i] = i % num_tcs;
2081                 }
2082                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2083                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2084                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2085         }
2086
2087         if (pfc_en)
2088                 eth_conf->dcb_capability_en =
2089                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2090         else
2091                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2092
2093         return 0;
2094 }
2095
2096 int
2097 init_port_dcb_config(portid_t pid,
2098                      enum dcb_mode_enable dcb_mode,
2099                      enum rte_eth_nb_tcs num_tcs,
2100                      uint8_t pfc_en)
2101 {
2102         struct rte_eth_conf port_conf;
2103         struct rte_port *rte_port;
2104         int retval;
2105         uint16_t i;
2106
2107         rte_port = &ports[pid];
2108
2109         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2110         /* Enter DCB configuration status */
2111         dcb_config = 1;
2112
2113         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2114         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2115         if (retval < 0)
2116                 return retval;
2117         port_conf.rxmode.hw_vlan_filter = 1;
2118
2119         /**
2120          * Write the configuration into the device.
2121          * Set the numbers of RX & TX queues to 0, so
2122          * the RX & TX queues will not be setup.
2123          */
2124         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2125
2126         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2127
2128         /* If dev_info.vmdq_pool_base is greater than 0,
2129          * the queue id of vmdq pools is started after pf queues.
2130          */
2131         if (dcb_mode == DCB_VT_ENABLED &&
2132             rte_port->dev_info.vmdq_pool_base > 0) {
2133                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2134                         " for port %d.", pid);
2135                 return -1;
2136         }
2137
2138         /* Assume the ports in testpmd have the same dcb capability
2139          * and has the same number of rxq and txq in dcb mode
2140          */
2141         if (dcb_mode == DCB_VT_ENABLED) {
2142                 if (rte_port->dev_info.max_vfs > 0) {
2143                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2144                         nb_txq = rte_port->dev_info.nb_tx_queues;
2145                 } else {
2146                         nb_rxq = rte_port->dev_info.max_rx_queues;
2147                         nb_txq = rte_port->dev_info.max_tx_queues;
2148                 }
2149         } else {
2150                 /*if vt is disabled, use all pf queues */
2151                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2152                         nb_rxq = rte_port->dev_info.max_rx_queues;
2153                         nb_txq = rte_port->dev_info.max_tx_queues;
2154                 } else {
2155                         nb_rxq = (queueid_t)num_tcs;
2156                         nb_txq = (queueid_t)num_tcs;
2157
2158                 }
2159         }
2160         rx_free_thresh = 64;
2161
2162         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2163
2164         rxtx_port_config(rte_port);
2165         /* VLAN filter */
2166         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2167         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2168                 rx_vft_set(pid, vlan_tags[i], 1);
2169
2170         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2171         map_port_queue_stats_mapping_registers(pid, rte_port);
2172
2173         rte_port->dcb_flag = 1;
2174
2175         return 0;
2176 }
2177
2178 static void
2179 init_port(void)
2180 {
2181         /* Configuration of Ethernet ports. */
2182         ports = rte_zmalloc("testpmd: ports",
2183                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2184                             RTE_CACHE_LINE_SIZE);
2185         if (ports == NULL) {
2186                 rte_exit(EXIT_FAILURE,
2187                                 "rte_zmalloc(%d struct rte_port) failed\n",
2188                                 RTE_MAX_ETHPORTS);
2189         }
2190 }
2191
2192 static void
2193 force_quit(void)
2194 {
2195         pmd_test_exit();
2196         prompt_exit();
2197 }
2198
2199 static void
2200 signal_handler(int signum)
2201 {
2202         if (signum == SIGINT || signum == SIGTERM) {
2203                 printf("\nSignal %d received, preparing to exit...\n",
2204                                 signum);
2205 #ifdef RTE_LIBRTE_PDUMP
2206                 /* uninitialize packet capture framework */
2207                 rte_pdump_uninit();
2208 #endif
2209 #ifdef RTE_LIBRTE_LATENCY_STATS
2210                 rte_latencystats_uninit();
2211 #endif
2212                 force_quit();
2213                 /* exit with the expected status */
2214                 signal(signum, SIG_DFL);
2215                 kill(getpid(), signum);
2216         }
2217 }
2218
2219 int
2220 main(int argc, char** argv)
2221 {
2222         int  diag;
2223         uint8_t port_id;
2224
2225         signal(SIGINT, signal_handler);
2226         signal(SIGTERM, signal_handler);
2227
2228         diag = rte_eal_init(argc, argv);
2229         if (diag < 0)
2230                 rte_panic("Cannot init EAL\n");
2231
2232 #ifdef RTE_LIBRTE_PDUMP
2233         /* initialize packet capture framework */
2234         rte_pdump_init(NULL);
2235 #endif
2236
2237         nb_ports = (portid_t) rte_eth_dev_count();
2238         if (nb_ports == 0)
2239                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2240
2241         /* allocate port structures, and init them */
2242         init_port();
2243
2244         set_def_fwd_config();
2245         if (nb_lcores == 0)
2246                 rte_panic("Empty set of forwarding logical cores - check the "
2247                           "core mask supplied in the command parameters\n");
2248
2249         /* Bitrate/latency stats disabled by default */
2250 #ifdef RTE_LIBRTE_BITRATE
2251         bitrate_enabled = 0;
2252 #endif
2253 #ifdef RTE_LIBRTE_LATENCY_STATS
2254         latencystats_enabled = 0;
2255 #endif
2256
2257         argc -= diag;
2258         argv += diag;
2259         if (argc > 1)
2260                 launch_args_parse(argc, argv);
2261
2262         if (!nb_rxq && !nb_txq)
2263                 printf("Warning: Either rx or tx queues should be non-zero\n");
2264
2265         if (nb_rxq > 1 && nb_rxq > nb_txq)
2266                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2267                        "but nb_txq=%d will prevent to fully test it.\n",
2268                        nb_rxq, nb_txq);
2269
2270         init_config();
2271         if (start_port(RTE_PORT_ALL) != 0)
2272                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2273
2274         /* set all ports to promiscuous mode by default */
2275         RTE_ETH_FOREACH_DEV(port_id)
2276                 rte_eth_promiscuous_enable(port_id);
2277
2278         /* Init metrics library */
2279         rte_metrics_init(rte_socket_id());
2280
2281 #ifdef RTE_LIBRTE_LATENCY_STATS
2282         if (latencystats_enabled != 0) {
2283                 int ret = rte_latencystats_init(1, NULL);
2284                 if (ret)
2285                         printf("Warning: latencystats init()"
2286                                 " returned error %d\n", ret);
2287                 printf("Latencystats running on lcore %d\n",
2288                         latencystats_lcore_id);
2289         }
2290 #endif
2291
2292         /* Setup bitrate stats */
2293 #ifdef RTE_LIBRTE_BITRATE
2294         if (bitrate_enabled != 0) {
2295                 bitrate_data = rte_stats_bitrate_create();
2296                 if (bitrate_data == NULL)
2297                         rte_exit(EXIT_FAILURE,
2298                                 "Could not allocate bitrate data.\n");
2299                 rte_stats_bitrate_reg(bitrate_data);
2300         }
2301 #endif
2302
2303 #ifdef RTE_LIBRTE_CMDLINE
2304         if (strlen(cmdline_filename) != 0)
2305                 cmdline_read_from_file(cmdline_filename);
2306
2307         if (interactive == 1) {
2308                 if (auto_start) {
2309                         printf("Start automatic packet forwarding\n");
2310                         start_packet_forwarding(0);
2311                 }
2312                 prompt();
2313                 pmd_test_exit();
2314         } else
2315 #endif
2316         {
2317                 char c;
2318                 int rc;
2319
2320                 printf("No commandline core given, start packet forwarding\n");
2321                 start_packet_forwarding(0);
2322                 printf("Press enter to exit\n");
2323                 rc = read(0, &c, 1);
2324                 pmd_test_exit();
2325                 if (rc < 0)
2326                         return 1;
2327         }
2328
2329         return 0;
2330 }