f1325ce9aeeebb7037aab48d32d90b32c1b7bd07
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81 #include <rte_flow.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
85 #endif
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 0; /**< No numa support by default */
107
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;        /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156         &io_fwd_engine,
157         &mac_fwd_engine,
158         &mac_swap_engine,
159         &flow_gen_engine,
160         &rx_only_engine,
161         &tx_only_engine,
162         &csum_fwd_engine,
163         &icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165         &ieee1588_fwd_engine,
166 #endif
167         NULL,
168 };
169
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185         TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272
273 /*
274  * Enable link status change notification
275  */
276 uint8_t lsc_interrupt = 1; /* enabled by default */
277
278 /*
279  * NIC bypass mode configuration options.
280  */
281 #ifdef RTE_NIC_BYPASS
282
283 /* The NIC bypass watchdog timeout. */
284 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
285
286 #endif
287
288 #ifdef RTE_LIBRTE_LATENCY_STATS
289
290 /*
291  * Set when latency stats is enabled in the commandline
292  */
293 uint8_t latencystats_enabled;
294
295 /*
296  * Lcore ID to serive latency statistics.
297  */
298 lcoreid_t latencystats_lcore_id = -1;
299
300 #endif
301
302 /*
303  * Ethernet device configuration.
304  */
305 struct rte_eth_rxmode rx_mode = {
306         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
307         .split_hdr_size = 0,
308         .header_split   = 0, /**< Header Split disabled. */
309         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
310         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
311         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
312         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
313         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
314         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
315 };
316
317 struct rte_fdir_conf fdir_conf = {
318         .mode = RTE_FDIR_MODE_NONE,
319         .pballoc = RTE_FDIR_PBALLOC_64K,
320         .status = RTE_FDIR_REPORT_STATUS,
321         .mask = {
322                 .vlan_tci_mask = 0x0,
323                 .ipv4_mask     = {
324                         .src_ip = 0xFFFFFFFF,
325                         .dst_ip = 0xFFFFFFFF,
326                 },
327                 .ipv6_mask     = {
328                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
329                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330                 },
331                 .src_port_mask = 0xFFFF,
332                 .dst_port_mask = 0xFFFF,
333                 .mac_addr_byte_mask = 0xFF,
334                 .tunnel_type_mask = 1,
335                 .tunnel_id_mask = 0xFFFFFFFF,
336         },
337         .drop_queue = 127,
338 };
339
340 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
341
342 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
343 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
344
345 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
346 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
347
348 uint16_t nb_tx_queue_stats_mappings = 0;
349 uint16_t nb_rx_queue_stats_mappings = 0;
350
351 unsigned max_socket = 0;
352
353 /* Bitrate statistics */
354 struct rte_stats_bitrates *bitrate_data;
355
356 /* Forward function declarations */
357 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
358 static void check_all_ports_link_status(uint32_t port_mask);
359 static void eth_event_callback(uint8_t port_id,
360                                enum rte_eth_event_type type,
361                                void *param);
362
363 /*
364  * Check if all the ports are started.
365  * If yes, return positive value. If not, return zero.
366  */
367 static int all_ports_started(void);
368
369 /*
370  * Setup default configuration.
371  */
372 static void
373 set_default_fwd_lcores_config(void)
374 {
375         unsigned int i;
376         unsigned int nb_lc;
377         unsigned int sock_num;
378
379         nb_lc = 0;
380         for (i = 0; i < RTE_MAX_LCORE; i++) {
381                 sock_num = rte_lcore_to_socket_id(i) + 1;
382                 if (sock_num > max_socket) {
383                         if (sock_num > RTE_MAX_NUMA_NODES)
384                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
385                         max_socket = sock_num;
386                 }
387                 if (!rte_lcore_is_enabled(i))
388                         continue;
389                 if (i == rte_get_master_lcore())
390                         continue;
391                 fwd_lcores_cpuids[nb_lc++] = i;
392         }
393         nb_lcores = (lcoreid_t) nb_lc;
394         nb_cfg_lcores = nb_lcores;
395         nb_fwd_lcores = 1;
396 }
397
398 static void
399 set_def_peer_eth_addrs(void)
400 {
401         portid_t i;
402
403         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
404                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
405                 peer_eth_addrs[i].addr_bytes[5] = i;
406         }
407 }
408
409 static void
410 set_default_fwd_ports_config(void)
411 {
412         portid_t pt_id;
413
414         for (pt_id = 0; pt_id < nb_ports; pt_id++)
415                 fwd_ports_ids[pt_id] = pt_id;
416
417         nb_cfg_ports = nb_ports;
418         nb_fwd_ports = nb_ports;
419 }
420
421 void
422 set_def_fwd_config(void)
423 {
424         set_default_fwd_lcores_config();
425         set_def_peer_eth_addrs();
426         set_default_fwd_ports_config();
427 }
428
429 /*
430  * Configuration initialisation done once at init time.
431  */
432 static void
433 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
434                  unsigned int socket_id)
435 {
436         char pool_name[RTE_MEMPOOL_NAMESIZE];
437         struct rte_mempool *rte_mp = NULL;
438         uint32_t mb_size;
439
440         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
441         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442
443         RTE_LOG(INFO, USER1,
444                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
445                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
446
447 #ifdef RTE_LIBRTE_PMD_XENVIRT
448         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
449                 (unsigned) mb_mempool_cache,
450                 sizeof(struct rte_pktmbuf_pool_private),
451                 rte_pktmbuf_pool_init, NULL,
452                 rte_pktmbuf_init, NULL,
453                 socket_id, 0);
454 #endif
455
456         /* if the former XEN allocation failed fall back to normal allocation */
457         if (rte_mp == NULL) {
458                 if (mp_anon != 0) {
459                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
460                                 mb_size, (unsigned) mb_mempool_cache,
461                                 sizeof(struct rte_pktmbuf_pool_private),
462                                 socket_id, 0);
463                         if (rte_mp == NULL)
464                                 goto err;
465
466                         if (rte_mempool_populate_anon(rte_mp) == 0) {
467                                 rte_mempool_free(rte_mp);
468                                 rte_mp = NULL;
469                                 goto err;
470                         }
471                         rte_pktmbuf_pool_init(rte_mp, NULL);
472                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
473                 } else {
474                         /* wrapper to rte_mempool_create() */
475                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
476                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
477                 }
478         }
479
480 err:
481         if (rte_mp == NULL) {
482                 rte_exit(EXIT_FAILURE,
483                         "Creation of mbuf pool for socket %u failed: %s\n",
484                         socket_id, rte_strerror(rte_errno));
485         } else if (verbose_level > 0) {
486                 rte_mempool_dump(stdout, rte_mp);
487         }
488 }
489
490 /*
491  * Check given socket id is valid or not with NUMA mode,
492  * if valid, return 0, else return -1
493  */
494 static int
495 check_socket_id(const unsigned int socket_id)
496 {
497         static int warning_once = 0;
498
499         if (socket_id >= max_socket) {
500                 if (!warning_once && numa_support)
501                         printf("Warning: NUMA should be configured manually by"
502                                " using --port-numa-config and"
503                                " --ring-numa-config parameters along with"
504                                " --numa.\n");
505                 warning_once = 1;
506                 return -1;
507         }
508         return 0;
509 }
510
511 static void
512 init_config(void)
513 {
514         portid_t pid;
515         struct rte_port *port;
516         struct rte_mempool *mbp;
517         unsigned int nb_mbuf_per_pool;
518         lcoreid_t  lc_id;
519         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
520
521         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
522         /* Configuration of logical cores. */
523         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
524                                 sizeof(struct fwd_lcore *) * nb_lcores,
525                                 RTE_CACHE_LINE_SIZE);
526         if (fwd_lcores == NULL) {
527                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
528                                                         "failed\n", nb_lcores);
529         }
530         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
531                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
532                                                sizeof(struct fwd_lcore),
533                                                RTE_CACHE_LINE_SIZE);
534                 if (fwd_lcores[lc_id] == NULL) {
535                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
536                                                                 "failed\n");
537                 }
538                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
539         }
540
541         /*
542          * Create pools of mbuf.
543          * If NUMA support is disabled, create a single pool of mbuf in
544          * socket 0 memory by default.
545          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
546          *
547          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
548          * nb_txd can be configured at run time.
549          */
550         if (param_total_num_mbufs)
551                 nb_mbuf_per_pool = param_total_num_mbufs;
552         else {
553                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
554                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
555
556                 if (!numa_support)
557                         nb_mbuf_per_pool =
558                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
559         }
560
561         if (!numa_support) {
562                 if (socket_num == UMA_NO_CONFIG)
563                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
564                 else
565                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
566                                                  socket_num);
567         }
568
569         RTE_ETH_FOREACH_DEV(pid) {
570                 port = &ports[pid];
571                 rte_eth_dev_info_get(pid, &port->dev_info);
572
573                 if (numa_support) {
574                         if (port_numa[pid] != NUMA_NO_CONFIG)
575                                 port_per_socket[port_numa[pid]]++;
576                         else {
577                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
578
579                                 /* if socket_id is invalid, set to 0 */
580                                 if (check_socket_id(socket_id) < 0)
581                                         socket_id = 0;
582                                 port_per_socket[socket_id]++;
583                         }
584                 }
585
586                 /* set flag to initialize port/queue */
587                 port->need_reconfig = 1;
588                 port->need_reconfig_queues = 1;
589         }
590
591         if (numa_support) {
592                 uint8_t i;
593                 unsigned int nb_mbuf;
594
595                 if (param_total_num_mbufs)
596                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
597
598                 for (i = 0; i < max_socket; i++) {
599                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
600                         if (nb_mbuf)
601                                 mbuf_pool_create(mbuf_data_size,
602                                                 nb_mbuf,i);
603                 }
604         }
605         init_port_config();
606
607         /*
608          * Records which Mbuf pool to use by each logical core, if needed.
609          */
610         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
611                 mbp = mbuf_pool_find(
612                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
613
614                 if (mbp == NULL)
615                         mbp = mbuf_pool_find(0);
616                 fwd_lcores[lc_id]->mbp = mbp;
617         }
618
619         /* Configuration of packet forwarding streams. */
620         if (init_fwd_streams() < 0)
621                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
622
623         fwd_config_setup();
624 }
625
626
627 void
628 reconfig(portid_t new_port_id, unsigned socket_id)
629 {
630         struct rte_port *port;
631
632         /* Reconfiguration of Ethernet ports. */
633         port = &ports[new_port_id];
634         rte_eth_dev_info_get(new_port_id, &port->dev_info);
635
636         /* set flag to initialize port/queue */
637         port->need_reconfig = 1;
638         port->need_reconfig_queues = 1;
639         port->socket_id = socket_id;
640
641         init_port_config();
642 }
643
644
645 int
646 init_fwd_streams(void)
647 {
648         portid_t pid;
649         struct rte_port *port;
650         streamid_t sm_id, nb_fwd_streams_new;
651         queueid_t q;
652
653         /* set socket id according to numa or not */
654         RTE_ETH_FOREACH_DEV(pid) {
655                 port = &ports[pid];
656                 if (nb_rxq > port->dev_info.max_rx_queues) {
657                         printf("Fail: nb_rxq(%d) is greater than "
658                                 "max_rx_queues(%d)\n", nb_rxq,
659                                 port->dev_info.max_rx_queues);
660                         return -1;
661                 }
662                 if (nb_txq > port->dev_info.max_tx_queues) {
663                         printf("Fail: nb_txq(%d) is greater than "
664                                 "max_tx_queues(%d)\n", nb_txq,
665                                 port->dev_info.max_tx_queues);
666                         return -1;
667                 }
668                 if (numa_support) {
669                         if (port_numa[pid] != NUMA_NO_CONFIG)
670                                 port->socket_id = port_numa[pid];
671                         else {
672                                 port->socket_id = rte_eth_dev_socket_id(pid);
673
674                                 /* if socket_id is invalid, set to 0 */
675                                 if (check_socket_id(port->socket_id) < 0)
676                                         port->socket_id = 0;
677                         }
678                 }
679                 else {
680                         if (socket_num == UMA_NO_CONFIG)
681                                 port->socket_id = 0;
682                         else
683                                 port->socket_id = socket_num;
684                 }
685         }
686
687         q = RTE_MAX(nb_rxq, nb_txq);
688         if (q == 0) {
689                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
690                 return -1;
691         }
692         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
693         if (nb_fwd_streams_new == nb_fwd_streams)
694                 return 0;
695         /* clear the old */
696         if (fwd_streams != NULL) {
697                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
698                         if (fwd_streams[sm_id] == NULL)
699                                 continue;
700                         rte_free(fwd_streams[sm_id]);
701                         fwd_streams[sm_id] = NULL;
702                 }
703                 rte_free(fwd_streams);
704                 fwd_streams = NULL;
705         }
706
707         /* init new */
708         nb_fwd_streams = nb_fwd_streams_new;
709         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
710                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
711         if (fwd_streams == NULL)
712                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
713                                                 "failed\n", nb_fwd_streams);
714
715         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
716                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
717                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
718                 if (fwd_streams[sm_id] == NULL)
719                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
720                                                                 " failed\n");
721         }
722
723         return 0;
724 }
725
726 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
727 static void
728 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
729 {
730         unsigned int total_burst;
731         unsigned int nb_burst;
732         unsigned int burst_stats[3];
733         uint16_t pktnb_stats[3];
734         uint16_t nb_pkt;
735         int burst_percent[3];
736
737         /*
738          * First compute the total number of packet bursts and the
739          * two highest numbers of bursts of the same number of packets.
740          */
741         total_burst = 0;
742         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
743         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
744         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
745                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
746                 if (nb_burst == 0)
747                         continue;
748                 total_burst += nb_burst;
749                 if (nb_burst > burst_stats[0]) {
750                         burst_stats[1] = burst_stats[0];
751                         pktnb_stats[1] = pktnb_stats[0];
752                         burst_stats[0] = nb_burst;
753                         pktnb_stats[0] = nb_pkt;
754                 }
755         }
756         if (total_burst == 0)
757                 return;
758         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
759         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
760                burst_percent[0], (int) pktnb_stats[0]);
761         if (burst_stats[0] == total_burst) {
762                 printf("]\n");
763                 return;
764         }
765         if (burst_stats[0] + burst_stats[1] == total_burst) {
766                 printf(" + %d%% of %d pkts]\n",
767                        100 - burst_percent[0], pktnb_stats[1]);
768                 return;
769         }
770         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
771         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
772         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
773                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
774                 return;
775         }
776         printf(" + %d%% of %d pkts + %d%% of others]\n",
777                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
778 }
779 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
780
781 static void
782 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
783 {
784         struct rte_port *port;
785         uint8_t i;
786
787         static const char *fwd_stats_border = "----------------------";
788
789         port = &ports[port_id];
790         printf("\n  %s Forward statistics for port %-2d %s\n",
791                fwd_stats_border, port_id, fwd_stats_border);
792
793         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
794                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
795                        "%-"PRIu64"\n",
796                        stats->ipackets, stats->imissed,
797                        (uint64_t) (stats->ipackets + stats->imissed));
798
799                 if (cur_fwd_eng == &csum_fwd_engine)
800                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
801                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
802                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
803                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
804                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
805                 }
806
807                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
808                        "%-"PRIu64"\n",
809                        stats->opackets, port->tx_dropped,
810                        (uint64_t) (stats->opackets + port->tx_dropped));
811         }
812         else {
813                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
814                        "%14"PRIu64"\n",
815                        stats->ipackets, stats->imissed,
816                        (uint64_t) (stats->ipackets + stats->imissed));
817
818                 if (cur_fwd_eng == &csum_fwd_engine)
819                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
820                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
821                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
822                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
823                         printf("  RX-nombufs:             %14"PRIu64"\n",
824                                stats->rx_nombuf);
825                 }
826
827                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
828                        "%14"PRIu64"\n",
829                        stats->opackets, port->tx_dropped,
830                        (uint64_t) (stats->opackets + port->tx_dropped));
831         }
832
833 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
834         if (port->rx_stream)
835                 pkt_burst_stats_display("RX",
836                         &port->rx_stream->rx_burst_stats);
837         if (port->tx_stream)
838                 pkt_burst_stats_display("TX",
839                         &port->tx_stream->tx_burst_stats);
840 #endif
841
842         if (port->rx_queue_stats_mapping_enabled) {
843                 printf("\n");
844                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
845                         printf("  Stats reg %2d RX-packets:%14"PRIu64
846                                "     RX-errors:%14"PRIu64
847                                "    RX-bytes:%14"PRIu64"\n",
848                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
849                 }
850                 printf("\n");
851         }
852         if (port->tx_queue_stats_mapping_enabled) {
853                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
854                         printf("  Stats reg %2d TX-packets:%14"PRIu64
855                                "                                 TX-bytes:%14"PRIu64"\n",
856                                i, stats->q_opackets[i], stats->q_obytes[i]);
857                 }
858         }
859
860         printf("  %s--------------------------------%s\n",
861                fwd_stats_border, fwd_stats_border);
862 }
863
864 static void
865 fwd_stream_stats_display(streamid_t stream_id)
866 {
867         struct fwd_stream *fs;
868         static const char *fwd_top_stats_border = "-------";
869
870         fs = fwd_streams[stream_id];
871         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
872             (fs->fwd_dropped == 0))
873                 return;
874         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
875                "TX Port=%2d/Queue=%2d %s\n",
876                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
877                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
878         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
879                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
880
881         /* if checksum mode */
882         if (cur_fwd_eng == &csum_fwd_engine) {
883                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
884                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
885         }
886
887 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
888         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
889         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
890 #endif
891 }
892
893 static void
894 flush_fwd_rx_queues(void)
895 {
896         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
897         portid_t  rxp;
898         portid_t port_id;
899         queueid_t rxq;
900         uint16_t  nb_rx;
901         uint16_t  i;
902         uint8_t   j;
903         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
904         uint64_t timer_period;
905
906         /* convert to number of cycles */
907         timer_period = rte_get_timer_hz(); /* 1 second timeout */
908
909         for (j = 0; j < 2; j++) {
910                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
911                         for (rxq = 0; rxq < nb_rxq; rxq++) {
912                                 port_id = fwd_ports_ids[rxp];
913                                 /**
914                                 * testpmd can stuck in the below do while loop
915                                 * if rte_eth_rx_burst() always returns nonzero
916                                 * packets. So timer is added to exit this loop
917                                 * after 1sec timer expiry.
918                                 */
919                                 prev_tsc = rte_rdtsc();
920                                 do {
921                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
922                                                 pkts_burst, MAX_PKT_BURST);
923                                         for (i = 0; i < nb_rx; i++)
924                                                 rte_pktmbuf_free(pkts_burst[i]);
925
926                                         cur_tsc = rte_rdtsc();
927                                         diff_tsc = cur_tsc - prev_tsc;
928                                         timer_tsc += diff_tsc;
929                                 } while ((nb_rx > 0) &&
930                                         (timer_tsc < timer_period));
931                                 timer_tsc = 0;
932                         }
933                 }
934                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
935         }
936 }
937
938 static void
939 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
940 {
941         struct fwd_stream **fsm;
942         streamid_t nb_fs;
943         streamid_t sm_id;
944 #ifdef RTE_LIBRTE_BITRATE
945         uint64_t tics_per_1sec;
946         uint64_t tics_datum;
947         uint64_t tics_current;
948         uint8_t idx_port, cnt_ports;
949
950         cnt_ports = rte_eth_dev_count();
951         tics_datum = rte_rdtsc();
952         tics_per_1sec = rte_get_timer_hz();
953 #endif
954         fsm = &fwd_streams[fc->stream_idx];
955         nb_fs = fc->stream_nb;
956         do {
957                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
958                         (*pkt_fwd)(fsm[sm_id]);
959 #ifdef RTE_LIBRTE_BITRATE
960                 tics_current = rte_rdtsc();
961                 if (tics_current - tics_datum >= tics_per_1sec) {
962                         /* Periodic bitrate calculation */
963                         for (idx_port = 0; idx_port < cnt_ports; idx_port++)
964                                 rte_stats_bitrate_calc(bitrate_data, idx_port);
965                         tics_datum = tics_current;
966                 }
967 #endif
968 #ifdef RTE_LIBRTE_LATENCY_STATS
969                 if (latencystats_lcore_id == rte_lcore_id())
970                         rte_latencystats_update();
971 #endif
972
973         } while (! fc->stopped);
974 }
975
976 static int
977 start_pkt_forward_on_core(void *fwd_arg)
978 {
979         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
980                              cur_fwd_config.fwd_eng->packet_fwd);
981         return 0;
982 }
983
984 /*
985  * Run the TXONLY packet forwarding engine to send a single burst of packets.
986  * Used to start communication flows in network loopback test configurations.
987  */
988 static int
989 run_one_txonly_burst_on_core(void *fwd_arg)
990 {
991         struct fwd_lcore *fwd_lc;
992         struct fwd_lcore tmp_lcore;
993
994         fwd_lc = (struct fwd_lcore *) fwd_arg;
995         tmp_lcore = *fwd_lc;
996         tmp_lcore.stopped = 1;
997         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
998         return 0;
999 }
1000
1001 /*
1002  * Launch packet forwarding:
1003  *     - Setup per-port forwarding context.
1004  *     - launch logical cores with their forwarding configuration.
1005  */
1006 static void
1007 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1008 {
1009         port_fwd_begin_t port_fwd_begin;
1010         unsigned int i;
1011         unsigned int lc_id;
1012         int diag;
1013
1014         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1015         if (port_fwd_begin != NULL) {
1016                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1017                         (*port_fwd_begin)(fwd_ports_ids[i]);
1018         }
1019         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1020                 lc_id = fwd_lcores_cpuids[i];
1021                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1022                         fwd_lcores[i]->stopped = 0;
1023                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1024                                                      fwd_lcores[i], lc_id);
1025                         if (diag != 0)
1026                                 printf("launch lcore %u failed - diag=%d\n",
1027                                        lc_id, diag);
1028                 }
1029         }
1030 }
1031
1032 /*
1033  * Launch packet forwarding configuration.
1034  */
1035 void
1036 start_packet_forwarding(int with_tx_first)
1037 {
1038         port_fwd_begin_t port_fwd_begin;
1039         port_fwd_end_t  port_fwd_end;
1040         struct rte_port *port;
1041         unsigned int i;
1042         portid_t   pt_id;
1043         streamid_t sm_id;
1044
1045         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1046                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1047
1048         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1049                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1050
1051         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1052                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1053                 (!nb_rxq || !nb_txq))
1054                 rte_exit(EXIT_FAILURE,
1055                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1056                         cur_fwd_eng->fwd_mode_name);
1057
1058         if (all_ports_started() == 0) {
1059                 printf("Not all ports were started\n");
1060                 return;
1061         }
1062         if (test_done == 0) {
1063                 printf("Packet forwarding already started\n");
1064                 return;
1065         }
1066
1067         if (init_fwd_streams() < 0) {
1068                 printf("Fail from init_fwd_streams()\n");
1069                 return;
1070         }
1071
1072         if(dcb_test) {
1073                 for (i = 0; i < nb_fwd_ports; i++) {
1074                         pt_id = fwd_ports_ids[i];
1075                         port = &ports[pt_id];
1076                         if (!port->dcb_flag) {
1077                                 printf("In DCB mode, all forwarding ports must "
1078                                        "be configured in this mode.\n");
1079                                 return;
1080                         }
1081                 }
1082                 if (nb_fwd_lcores == 1) {
1083                         printf("In DCB mode,the nb forwarding cores "
1084                                "should be larger than 1.\n");
1085                         return;
1086                 }
1087         }
1088         test_done = 0;
1089
1090         if(!no_flush_rx)
1091                 flush_fwd_rx_queues();
1092
1093         fwd_config_setup();
1094         pkt_fwd_config_display(&cur_fwd_config);
1095         rxtx_config_display();
1096
1097         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1098                 pt_id = fwd_ports_ids[i];
1099                 port = &ports[pt_id];
1100                 rte_eth_stats_get(pt_id, &port->stats);
1101                 port->tx_dropped = 0;
1102
1103                 map_port_queue_stats_mapping_registers(pt_id, port);
1104         }
1105         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1106                 fwd_streams[sm_id]->rx_packets = 0;
1107                 fwd_streams[sm_id]->tx_packets = 0;
1108                 fwd_streams[sm_id]->fwd_dropped = 0;
1109                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1110                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1111
1112 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1113                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1114                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1115                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1116                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1117 #endif
1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1119                 fwd_streams[sm_id]->core_cycles = 0;
1120 #endif
1121         }
1122         if (with_tx_first) {
1123                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1124                 if (port_fwd_begin != NULL) {
1125                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1126                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1127                 }
1128                 while (with_tx_first--) {
1129                         launch_packet_forwarding(
1130                                         run_one_txonly_burst_on_core);
1131                         rte_eal_mp_wait_lcore();
1132                 }
1133                 port_fwd_end = tx_only_engine.port_fwd_end;
1134                 if (port_fwd_end != NULL) {
1135                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1136                                 (*port_fwd_end)(fwd_ports_ids[i]);
1137                 }
1138         }
1139         launch_packet_forwarding(start_pkt_forward_on_core);
1140 }
1141
1142 void
1143 stop_packet_forwarding(void)
1144 {
1145         struct rte_eth_stats stats;
1146         struct rte_port *port;
1147         port_fwd_end_t  port_fwd_end;
1148         int i;
1149         portid_t   pt_id;
1150         streamid_t sm_id;
1151         lcoreid_t  lc_id;
1152         uint64_t total_recv;
1153         uint64_t total_xmit;
1154         uint64_t total_rx_dropped;
1155         uint64_t total_tx_dropped;
1156         uint64_t total_rx_nombuf;
1157         uint64_t tx_dropped;
1158         uint64_t rx_bad_ip_csum;
1159         uint64_t rx_bad_l4_csum;
1160 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1161         uint64_t fwd_cycles;
1162 #endif
1163         static const char *acc_stats_border = "+++++++++++++++";
1164
1165         if (test_done) {
1166                 printf("Packet forwarding not started\n");
1167                 return;
1168         }
1169         printf("Telling cores to stop...");
1170         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1171                 fwd_lcores[lc_id]->stopped = 1;
1172         printf("\nWaiting for lcores to finish...\n");
1173         rte_eal_mp_wait_lcore();
1174         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1175         if (port_fwd_end != NULL) {
1176                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1177                         pt_id = fwd_ports_ids[i];
1178                         (*port_fwd_end)(pt_id);
1179                 }
1180         }
1181 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1182         fwd_cycles = 0;
1183 #endif
1184         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1185                 if (cur_fwd_config.nb_fwd_streams >
1186                     cur_fwd_config.nb_fwd_ports) {
1187                         fwd_stream_stats_display(sm_id);
1188                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1189                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1190                 } else {
1191                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1192                                 fwd_streams[sm_id];
1193                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1194                                 fwd_streams[sm_id];
1195                 }
1196                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1197                 tx_dropped = (uint64_t) (tx_dropped +
1198                                          fwd_streams[sm_id]->fwd_dropped);
1199                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1200
1201                 rx_bad_ip_csum =
1202                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1203                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1204                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1205                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1206                                                         rx_bad_ip_csum;
1207
1208                 rx_bad_l4_csum =
1209                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1210                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1211                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1212                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1213                                                         rx_bad_l4_csum;
1214
1215 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1216                 fwd_cycles = (uint64_t) (fwd_cycles +
1217                                          fwd_streams[sm_id]->core_cycles);
1218 #endif
1219         }
1220         total_recv = 0;
1221         total_xmit = 0;
1222         total_rx_dropped = 0;
1223         total_tx_dropped = 0;
1224         total_rx_nombuf  = 0;
1225         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1226                 pt_id = fwd_ports_ids[i];
1227
1228                 port = &ports[pt_id];
1229                 rte_eth_stats_get(pt_id, &stats);
1230                 stats.ipackets -= port->stats.ipackets;
1231                 port->stats.ipackets = 0;
1232                 stats.opackets -= port->stats.opackets;
1233                 port->stats.opackets = 0;
1234                 stats.ibytes   -= port->stats.ibytes;
1235                 port->stats.ibytes = 0;
1236                 stats.obytes   -= port->stats.obytes;
1237                 port->stats.obytes = 0;
1238                 stats.imissed  -= port->stats.imissed;
1239                 port->stats.imissed = 0;
1240                 stats.oerrors  -= port->stats.oerrors;
1241                 port->stats.oerrors = 0;
1242                 stats.rx_nombuf -= port->stats.rx_nombuf;
1243                 port->stats.rx_nombuf = 0;
1244
1245                 total_recv += stats.ipackets;
1246                 total_xmit += stats.opackets;
1247                 total_rx_dropped += stats.imissed;
1248                 total_tx_dropped += port->tx_dropped;
1249                 total_rx_nombuf  += stats.rx_nombuf;
1250
1251                 fwd_port_stats_display(pt_id, &stats);
1252         }
1253         printf("\n  %s Accumulated forward statistics for all ports"
1254                "%s\n",
1255                acc_stats_border, acc_stats_border);
1256         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1257                "%-"PRIu64"\n"
1258                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1259                "%-"PRIu64"\n",
1260                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1261                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1262         if (total_rx_nombuf > 0)
1263                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1264         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1265                "%s\n",
1266                acc_stats_border, acc_stats_border);
1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1268         if (total_recv > 0)
1269                 printf("\n  CPU cycles/packet=%u (total cycles="
1270                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1271                        (unsigned int)(fwd_cycles / total_recv),
1272                        fwd_cycles, total_recv);
1273 #endif
1274         printf("\nDone.\n");
1275         test_done = 1;
1276 }
1277
1278 void
1279 dev_set_link_up(portid_t pid)
1280 {
1281         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1282                 printf("\nSet link up fail.\n");
1283 }
1284
1285 void
1286 dev_set_link_down(portid_t pid)
1287 {
1288         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1289                 printf("\nSet link down fail.\n");
1290 }
1291
1292 static int
1293 all_ports_started(void)
1294 {
1295         portid_t pi;
1296         struct rte_port *port;
1297
1298         RTE_ETH_FOREACH_DEV(pi) {
1299                 port = &ports[pi];
1300                 /* Check if there is a port which is not started */
1301                 if ((port->port_status != RTE_PORT_STARTED) &&
1302                         (port->slave_flag == 0))
1303                         return 0;
1304         }
1305
1306         /* No port is not started */
1307         return 1;
1308 }
1309
1310 int
1311 all_ports_stopped(void)
1312 {
1313         portid_t pi;
1314         struct rte_port *port;
1315
1316         RTE_ETH_FOREACH_DEV(pi) {
1317                 port = &ports[pi];
1318                 if ((port->port_status != RTE_PORT_STOPPED) &&
1319                         (port->slave_flag == 0))
1320                         return 0;
1321         }
1322
1323         return 1;
1324 }
1325
1326 int
1327 port_is_started(portid_t port_id)
1328 {
1329         if (port_id_is_invalid(port_id, ENABLED_WARN))
1330                 return 0;
1331
1332         if (ports[port_id].port_status != RTE_PORT_STARTED)
1333                 return 0;
1334
1335         return 1;
1336 }
1337
1338 static int
1339 port_is_closed(portid_t port_id)
1340 {
1341         if (port_id_is_invalid(port_id, ENABLED_WARN))
1342                 return 0;
1343
1344         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1345                 return 0;
1346
1347         return 1;
1348 }
1349
1350 int
1351 start_port(portid_t pid)
1352 {
1353         int diag, need_check_link_status = -1;
1354         portid_t pi;
1355         queueid_t qi;
1356         struct rte_port *port;
1357         struct ether_addr mac_addr;
1358         enum rte_eth_event_type event_type;
1359
1360         if (port_id_is_invalid(pid, ENABLED_WARN))
1361                 return 0;
1362
1363         if(dcb_config)
1364                 dcb_test = 1;
1365         RTE_ETH_FOREACH_DEV(pi) {
1366                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1367                         continue;
1368
1369                 need_check_link_status = 0;
1370                 port = &ports[pi];
1371                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1372                                                  RTE_PORT_HANDLING) == 0) {
1373                         printf("Port %d is now not stopped\n", pi);
1374                         continue;
1375                 }
1376
1377                 if (port->need_reconfig > 0) {
1378                         port->need_reconfig = 0;
1379
1380                         printf("Configuring Port %d (socket %u)\n", pi,
1381                                         port->socket_id);
1382                         /* configure port */
1383                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1384                                                 &(port->dev_conf));
1385                         if (diag != 0) {
1386                                 if (rte_atomic16_cmpset(&(port->port_status),
1387                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1388                                         printf("Port %d can not be set back "
1389                                                         "to stopped\n", pi);
1390                                 printf("Fail to configure port %d\n", pi);
1391                                 /* try to reconfigure port next time */
1392                                 port->need_reconfig = 1;
1393                                 return -1;
1394                         }
1395                 }
1396                 if (port->need_reconfig_queues > 0) {
1397                         port->need_reconfig_queues = 0;
1398                         /* setup tx queues */
1399                         for (qi = 0; qi < nb_txq; qi++) {
1400                                 if ((numa_support) &&
1401                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1402                                         diag = rte_eth_tx_queue_setup(pi, qi,
1403                                                 nb_txd,txring_numa[pi],
1404                                                 &(port->tx_conf));
1405                                 else
1406                                         diag = rte_eth_tx_queue_setup(pi, qi,
1407                                                 nb_txd,port->socket_id,
1408                                                 &(port->tx_conf));
1409
1410                                 if (diag == 0)
1411                                         continue;
1412
1413                                 /* Fail to setup tx queue, return */
1414                                 if (rte_atomic16_cmpset(&(port->port_status),
1415                                                         RTE_PORT_HANDLING,
1416                                                         RTE_PORT_STOPPED) == 0)
1417                                         printf("Port %d can not be set back "
1418                                                         "to stopped\n", pi);
1419                                 printf("Fail to configure port %d tx queues\n", pi);
1420                                 /* try to reconfigure queues next time */
1421                                 port->need_reconfig_queues = 1;
1422                                 return -1;
1423                         }
1424                         /* setup rx queues */
1425                         for (qi = 0; qi < nb_rxq; qi++) {
1426                                 if ((numa_support) &&
1427                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1428                                         struct rte_mempool * mp =
1429                                                 mbuf_pool_find(rxring_numa[pi]);
1430                                         if (mp == NULL) {
1431                                                 printf("Failed to setup RX queue:"
1432                                                         "No mempool allocation"
1433                                                         " on the socket %d\n",
1434                                                         rxring_numa[pi]);
1435                                                 return -1;
1436                                         }
1437
1438                                         diag = rte_eth_rx_queue_setup(pi, qi,
1439                                              nb_rxd,rxring_numa[pi],
1440                                              &(port->rx_conf),mp);
1441                                 } else {
1442                                         struct rte_mempool *mp =
1443                                                 mbuf_pool_find(port->socket_id);
1444                                         if (mp == NULL) {
1445                                                 printf("Failed to setup RX queue:"
1446                                                         "No mempool allocation"
1447                                                         " on the socket %d\n",
1448                                                         port->socket_id);
1449                                                 return -1;
1450                                         }
1451                                         diag = rte_eth_rx_queue_setup(pi, qi,
1452                                              nb_rxd,port->socket_id,
1453                                              &(port->rx_conf), mp);
1454                                 }
1455                                 if (diag == 0)
1456                                         continue;
1457
1458                                 /* Fail to setup rx queue, return */
1459                                 if (rte_atomic16_cmpset(&(port->port_status),
1460                                                         RTE_PORT_HANDLING,
1461                                                         RTE_PORT_STOPPED) == 0)
1462                                         printf("Port %d can not be set back "
1463                                                         "to stopped\n", pi);
1464                                 printf("Fail to configure port %d rx queues\n", pi);
1465                                 /* try to reconfigure queues next time */
1466                                 port->need_reconfig_queues = 1;
1467                                 return -1;
1468                         }
1469                 }
1470
1471                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1472                      event_type < RTE_ETH_EVENT_MAX;
1473                      event_type++) {
1474                         diag = rte_eth_dev_callback_register(pi,
1475                                                         event_type,
1476                                                         eth_event_callback,
1477                                                         NULL);
1478                         if (diag) {
1479                                 printf("Failed to setup even callback for event %d\n",
1480                                         event_type);
1481                                 return -1;
1482                         }
1483                 }
1484
1485                 /* start port */
1486                 if (rte_eth_dev_start(pi) < 0) {
1487                         printf("Fail to start port %d\n", pi);
1488
1489                         /* Fail to setup rx queue, return */
1490                         if (rte_atomic16_cmpset(&(port->port_status),
1491                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1492                                 printf("Port %d can not be set back to "
1493                                                         "stopped\n", pi);
1494                         continue;
1495                 }
1496
1497                 if (rte_atomic16_cmpset(&(port->port_status),
1498                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1499                         printf("Port %d can not be set into started\n", pi);
1500
1501                 rte_eth_macaddr_get(pi, &mac_addr);
1502                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1503                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1504                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1505                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1506
1507                 /* at least one port started, need checking link status */
1508                 need_check_link_status = 1;
1509         }
1510
1511         if (need_check_link_status == 1 && !no_link_check)
1512                 check_all_ports_link_status(RTE_PORT_ALL);
1513         else if (need_check_link_status == 0)
1514                 printf("Please stop the ports first\n");
1515
1516         printf("Done\n");
1517         return 0;
1518 }
1519
1520 void
1521 stop_port(portid_t pid)
1522 {
1523         portid_t pi;
1524         struct rte_port *port;
1525         int need_check_link_status = 0;
1526
1527         if (dcb_test) {
1528                 dcb_test = 0;
1529                 dcb_config = 0;
1530         }
1531
1532         if (port_id_is_invalid(pid, ENABLED_WARN))
1533                 return;
1534
1535         printf("Stopping ports...\n");
1536
1537         RTE_ETH_FOREACH_DEV(pi) {
1538                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1539                         continue;
1540
1541                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1542                         printf("Please remove port %d from forwarding configuration.\n", pi);
1543                         continue;
1544                 }
1545
1546                 if (port_is_bonding_slave(pi)) {
1547                         printf("Please remove port %d from bonded device.\n", pi);
1548                         continue;
1549                 }
1550
1551                 port = &ports[pi];
1552                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1553                                                 RTE_PORT_HANDLING) == 0)
1554                         continue;
1555
1556                 rte_eth_dev_stop(pi);
1557
1558                 if (rte_atomic16_cmpset(&(port->port_status),
1559                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1560                         printf("Port %d can not be set into stopped\n", pi);
1561                 need_check_link_status = 1;
1562         }
1563         if (need_check_link_status && !no_link_check)
1564                 check_all_ports_link_status(RTE_PORT_ALL);
1565
1566         printf("Done\n");
1567 }
1568
1569 void
1570 close_port(portid_t pid)
1571 {
1572         portid_t pi;
1573         struct rte_port *port;
1574
1575         if (port_id_is_invalid(pid, ENABLED_WARN))
1576                 return;
1577
1578         printf("Closing ports...\n");
1579
1580         RTE_ETH_FOREACH_DEV(pi) {
1581                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1582                         continue;
1583
1584                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1585                         printf("Please remove port %d from forwarding configuration.\n", pi);
1586                         continue;
1587                 }
1588
1589                 if (port_is_bonding_slave(pi)) {
1590                         printf("Please remove port %d from bonded device.\n", pi);
1591                         continue;
1592                 }
1593
1594                 port = &ports[pi];
1595                 if (rte_atomic16_cmpset(&(port->port_status),
1596                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1597                         printf("Port %d is already closed\n", pi);
1598                         continue;
1599                 }
1600
1601                 if (rte_atomic16_cmpset(&(port->port_status),
1602                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1603                         printf("Port %d is now not stopped\n", pi);
1604                         continue;
1605                 }
1606
1607                 if (port->flow_list)
1608                         port_flow_flush(pi);
1609                 rte_eth_dev_close(pi);
1610
1611                 if (rte_atomic16_cmpset(&(port->port_status),
1612                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1613                         printf("Port %d cannot be set to closed\n", pi);
1614         }
1615
1616         printf("Done\n");
1617 }
1618
1619 void
1620 attach_port(char *identifier)
1621 {
1622         portid_t pi = 0;
1623         unsigned int socket_id;
1624
1625         printf("Attaching a new port...\n");
1626
1627         if (identifier == NULL) {
1628                 printf("Invalid parameters are specified\n");
1629                 return;
1630         }
1631
1632         if (rte_eth_dev_attach(identifier, &pi))
1633                 return;
1634
1635         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1636         /* if socket_id is invalid, set to 0 */
1637         if (check_socket_id(socket_id) < 0)
1638                 socket_id = 0;
1639         reconfig(pi, socket_id);
1640         rte_eth_promiscuous_enable(pi);
1641
1642         nb_ports = rte_eth_dev_count();
1643
1644         ports[pi].port_status = RTE_PORT_STOPPED;
1645
1646         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1647         printf("Done\n");
1648 }
1649
1650 void
1651 detach_port(uint8_t port_id)
1652 {
1653         char name[RTE_ETH_NAME_MAX_LEN];
1654
1655         printf("Detaching a port...\n");
1656
1657         if (!port_is_closed(port_id)) {
1658                 printf("Please close port first\n");
1659                 return;
1660         }
1661
1662         if (ports[port_id].flow_list)
1663                 port_flow_flush(port_id);
1664
1665         if (rte_eth_dev_detach(port_id, name))
1666                 return;
1667
1668         nb_ports = rte_eth_dev_count();
1669
1670         printf("Port '%s' is detached. Now total ports is %d\n",
1671                         name, nb_ports);
1672         printf("Done\n");
1673         return;
1674 }
1675
1676 void
1677 pmd_test_exit(void)
1678 {
1679         portid_t pt_id;
1680
1681         if (test_done == 0)
1682                 stop_packet_forwarding();
1683
1684         if (ports != NULL) {
1685                 no_link_check = 1;
1686                 RTE_ETH_FOREACH_DEV(pt_id) {
1687                         printf("\nShutting down port %d...\n", pt_id);
1688                         fflush(stdout);
1689                         stop_port(pt_id);
1690                         close_port(pt_id);
1691                 }
1692         }
1693         printf("\nBye...\n");
1694 }
1695
1696 typedef void (*cmd_func_t)(void);
1697 struct pmd_test_command {
1698         const char *cmd_name;
1699         cmd_func_t cmd_func;
1700 };
1701
1702 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1703
1704 /* Check the link status of all ports in up to 9s, and print them finally */
1705 static void
1706 check_all_ports_link_status(uint32_t port_mask)
1707 {
1708 #define CHECK_INTERVAL 100 /* 100ms */
1709 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1710         uint8_t portid, count, all_ports_up, print_flag = 0;
1711         struct rte_eth_link link;
1712
1713         printf("Checking link statuses...\n");
1714         fflush(stdout);
1715         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1716                 all_ports_up = 1;
1717                 RTE_ETH_FOREACH_DEV(portid) {
1718                         if ((port_mask & (1 << portid)) == 0)
1719                                 continue;
1720                         memset(&link, 0, sizeof(link));
1721                         rte_eth_link_get_nowait(portid, &link);
1722                         /* print link status if flag set */
1723                         if (print_flag == 1) {
1724                                 if (link.link_status)
1725                                         printf("Port %d Link Up - speed %u "
1726                                                 "Mbps - %s\n", (uint8_t)portid,
1727                                                 (unsigned)link.link_speed,
1728                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1729                                         ("full-duplex") : ("half-duplex\n"));
1730                                 else
1731                                         printf("Port %d Link Down\n",
1732                                                 (uint8_t)portid);
1733                                 continue;
1734                         }
1735                         /* clear all_ports_up flag if any link down */
1736                         if (link.link_status == ETH_LINK_DOWN) {
1737                                 all_ports_up = 0;
1738                                 break;
1739                         }
1740                 }
1741                 /* after finally printing all link status, get out */
1742                 if (print_flag == 1)
1743                         break;
1744
1745                 if (all_ports_up == 0) {
1746                         fflush(stdout);
1747                         rte_delay_ms(CHECK_INTERVAL);
1748                 }
1749
1750                 /* set the print_flag if all ports up or timeout */
1751                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1752                         print_flag = 1;
1753                 }
1754
1755                 if (lsc_interrupt)
1756                         break;
1757         }
1758 }
1759
1760 /* This function is used by the interrupt thread */
1761 static void
1762 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1763 {
1764         static const char * const event_desc[] = {
1765                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1766                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1767                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1768                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1769                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1770                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1771                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1772                 [RTE_ETH_EVENT_MAX] = NULL,
1773         };
1774
1775         RTE_SET_USED(param);
1776
1777         if (type >= RTE_ETH_EVENT_MAX) {
1778                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1779                         port_id, __func__, type);
1780                 fflush(stderr);
1781         } else {
1782                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1783                         event_desc[type]);
1784                 fflush(stdout);
1785         }
1786 }
1787
1788 static int
1789 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1790 {
1791         uint16_t i;
1792         int diag;
1793         uint8_t mapping_found = 0;
1794
1795         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1796                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1797                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1798                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1799                                         tx_queue_stats_mappings[i].queue_id,
1800                                         tx_queue_stats_mappings[i].stats_counter_id);
1801                         if (diag != 0)
1802                                 return diag;
1803                         mapping_found = 1;
1804                 }
1805         }
1806         if (mapping_found)
1807                 port->tx_queue_stats_mapping_enabled = 1;
1808         return 0;
1809 }
1810
1811 static int
1812 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1813 {
1814         uint16_t i;
1815         int diag;
1816         uint8_t mapping_found = 0;
1817
1818         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1819                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1820                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1821                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1822                                         rx_queue_stats_mappings[i].queue_id,
1823                                         rx_queue_stats_mappings[i].stats_counter_id);
1824                         if (diag != 0)
1825                                 return diag;
1826                         mapping_found = 1;
1827                 }
1828         }
1829         if (mapping_found)
1830                 port->rx_queue_stats_mapping_enabled = 1;
1831         return 0;
1832 }
1833
1834 static void
1835 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1836 {
1837         int diag = 0;
1838
1839         diag = set_tx_queue_stats_mapping_registers(pi, port);
1840         if (diag != 0) {
1841                 if (diag == -ENOTSUP) {
1842                         port->tx_queue_stats_mapping_enabled = 0;
1843                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1844                 }
1845                 else
1846                         rte_exit(EXIT_FAILURE,
1847                                         "set_tx_queue_stats_mapping_registers "
1848                                         "failed for port id=%d diag=%d\n",
1849                                         pi, diag);
1850         }
1851
1852         diag = set_rx_queue_stats_mapping_registers(pi, port);
1853         if (diag != 0) {
1854                 if (diag == -ENOTSUP) {
1855                         port->rx_queue_stats_mapping_enabled = 0;
1856                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1857                 }
1858                 else
1859                         rte_exit(EXIT_FAILURE,
1860                                         "set_rx_queue_stats_mapping_registers "
1861                                         "failed for port id=%d diag=%d\n",
1862                                         pi, diag);
1863         }
1864 }
1865
1866 static void
1867 rxtx_port_config(struct rte_port *port)
1868 {
1869         port->rx_conf = port->dev_info.default_rxconf;
1870         port->tx_conf = port->dev_info.default_txconf;
1871
1872         /* Check if any RX/TX parameters have been passed */
1873         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1874                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1875
1876         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1877                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1878
1879         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1880                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1881
1882         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1883                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1884
1885         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1886                 port->rx_conf.rx_drop_en = rx_drop_en;
1887
1888         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1889                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1890
1891         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1892                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1893
1894         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1895                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1896
1897         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1898                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1899
1900         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1901                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1902
1903         if (txq_flags != RTE_PMD_PARAM_UNSET)
1904                 port->tx_conf.txq_flags = txq_flags;
1905 }
1906
1907 void
1908 init_port_config(void)
1909 {
1910         portid_t pid;
1911         struct rte_port *port;
1912
1913         RTE_ETH_FOREACH_DEV(pid) {
1914                 port = &ports[pid];
1915                 port->dev_conf.rxmode = rx_mode;
1916                 port->dev_conf.fdir_conf = fdir_conf;
1917                 if (nb_rxq > 1) {
1918                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1919                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1920                 } else {
1921                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1922                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1923                 }
1924
1925                 if (port->dcb_flag == 0) {
1926                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1927                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1928                         else
1929                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1930                 }
1931
1932                 rxtx_port_config(port);
1933
1934                 rte_eth_macaddr_get(pid, &port->eth_addr);
1935
1936                 map_port_queue_stats_mapping_registers(pid, port);
1937 #ifdef RTE_NIC_BYPASS
1938                 rte_eth_dev_bypass_init(pid);
1939 #endif
1940
1941                 if (lsc_interrupt &&
1942                     (rte_eth_devices[pid].data->dev_flags &
1943                      RTE_ETH_DEV_INTR_LSC))
1944                         port->dev_conf.intr_conf.lsc = 1;
1945         }
1946 }
1947
1948 void set_port_slave_flag(portid_t slave_pid)
1949 {
1950         struct rte_port *port;
1951
1952         port = &ports[slave_pid];
1953         port->slave_flag = 1;
1954 }
1955
1956 void clear_port_slave_flag(portid_t slave_pid)
1957 {
1958         struct rte_port *port;
1959
1960         port = &ports[slave_pid];
1961         port->slave_flag = 0;
1962 }
1963
1964 uint8_t port_is_bonding_slave(portid_t slave_pid)
1965 {
1966         struct rte_port *port;
1967
1968         port = &ports[slave_pid];
1969         return port->slave_flag;
1970 }
1971
1972 const uint16_t vlan_tags[] = {
1973                 0,  1,  2,  3,  4,  5,  6,  7,
1974                 8,  9, 10, 11,  12, 13, 14, 15,
1975                 16, 17, 18, 19, 20, 21, 22, 23,
1976                 24, 25, 26, 27, 28, 29, 30, 31
1977 };
1978
1979 static  int
1980 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1981                  enum dcb_mode_enable dcb_mode,
1982                  enum rte_eth_nb_tcs num_tcs,
1983                  uint8_t pfc_en)
1984 {
1985         uint8_t i;
1986
1987         /*
1988          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1989          * given above, and the number of traffic classes available for use.
1990          */
1991         if (dcb_mode == DCB_VT_ENABLED) {
1992                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1993                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1994                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1995                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1996
1997                 /* VMDQ+DCB RX and TX configurations */
1998                 vmdq_rx_conf->enable_default_pool = 0;
1999                 vmdq_rx_conf->default_pool = 0;
2000                 vmdq_rx_conf->nb_queue_pools =
2001                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2002                 vmdq_tx_conf->nb_queue_pools =
2003                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2004
2005                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2006                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2007                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2008                         vmdq_rx_conf->pool_map[i].pools =
2009                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2010                 }
2011                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2012                         vmdq_rx_conf->dcb_tc[i] = i;
2013                         vmdq_tx_conf->dcb_tc[i] = i;
2014                 }
2015
2016                 /* set DCB mode of RX and TX of multiple queues */
2017                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2018                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2019         } else {
2020                 struct rte_eth_dcb_rx_conf *rx_conf =
2021                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2022                 struct rte_eth_dcb_tx_conf *tx_conf =
2023                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2024
2025                 rx_conf->nb_tcs = num_tcs;
2026                 tx_conf->nb_tcs = num_tcs;
2027
2028                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2029                         rx_conf->dcb_tc[i] = i % num_tcs;
2030                         tx_conf->dcb_tc[i] = i % num_tcs;
2031                 }
2032                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2033                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2034                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2035         }
2036
2037         if (pfc_en)
2038                 eth_conf->dcb_capability_en =
2039                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2040         else
2041                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2042
2043         return 0;
2044 }
2045
2046 int
2047 init_port_dcb_config(portid_t pid,
2048                      enum dcb_mode_enable dcb_mode,
2049                      enum rte_eth_nb_tcs num_tcs,
2050                      uint8_t pfc_en)
2051 {
2052         struct rte_eth_conf port_conf;
2053         struct rte_port *rte_port;
2054         int retval;
2055         uint16_t i;
2056
2057         rte_port = &ports[pid];
2058
2059         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2060         /* Enter DCB configuration status */
2061         dcb_config = 1;
2062
2063         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2064         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2065         if (retval < 0)
2066                 return retval;
2067         port_conf.rxmode.hw_vlan_filter = 1;
2068
2069         /**
2070          * Write the configuration into the device.
2071          * Set the numbers of RX & TX queues to 0, so
2072          * the RX & TX queues will not be setup.
2073          */
2074         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2075
2076         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2077
2078         /* If dev_info.vmdq_pool_base is greater than 0,
2079          * the queue id of vmdq pools is started after pf queues.
2080          */
2081         if (dcb_mode == DCB_VT_ENABLED &&
2082             rte_port->dev_info.vmdq_pool_base > 0) {
2083                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2084                         " for port %d.", pid);
2085                 return -1;
2086         }
2087
2088         /* Assume the ports in testpmd have the same dcb capability
2089          * and has the same number of rxq and txq in dcb mode
2090          */
2091         if (dcb_mode == DCB_VT_ENABLED) {
2092                 if (rte_port->dev_info.max_vfs > 0) {
2093                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2094                         nb_txq = rte_port->dev_info.nb_tx_queues;
2095                 } else {
2096                         nb_rxq = rte_port->dev_info.max_rx_queues;
2097                         nb_txq = rte_port->dev_info.max_tx_queues;
2098                 }
2099         } else {
2100                 /*if vt is disabled, use all pf queues */
2101                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2102                         nb_rxq = rte_port->dev_info.max_rx_queues;
2103                         nb_txq = rte_port->dev_info.max_tx_queues;
2104                 } else {
2105                         nb_rxq = (queueid_t)num_tcs;
2106                         nb_txq = (queueid_t)num_tcs;
2107
2108                 }
2109         }
2110         rx_free_thresh = 64;
2111
2112         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2113
2114         rxtx_port_config(rte_port);
2115         /* VLAN filter */
2116         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2117         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2118                 rx_vft_set(pid, vlan_tags[i], 1);
2119
2120         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2121         map_port_queue_stats_mapping_registers(pid, rte_port);
2122
2123         rte_port->dcb_flag = 1;
2124
2125         return 0;
2126 }
2127
2128 static void
2129 init_port(void)
2130 {
2131         /* Configuration of Ethernet ports. */
2132         ports = rte_zmalloc("testpmd: ports",
2133                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2134                             RTE_CACHE_LINE_SIZE);
2135         if (ports == NULL) {
2136                 rte_exit(EXIT_FAILURE,
2137                                 "rte_zmalloc(%d struct rte_port) failed\n",
2138                                 RTE_MAX_ETHPORTS);
2139         }
2140 }
2141
2142 static void
2143 force_quit(void)
2144 {
2145         pmd_test_exit();
2146         prompt_exit();
2147 }
2148
2149 static void
2150 signal_handler(int signum)
2151 {
2152         if (signum == SIGINT || signum == SIGTERM) {
2153                 printf("\nSignal %d received, preparing to exit...\n",
2154                                 signum);
2155 #ifdef RTE_LIBRTE_PDUMP
2156                 /* uninitialize packet capture framework */
2157                 rte_pdump_uninit();
2158 #endif
2159 #ifdef RTE_LIBRTE_LATENCY_STATS
2160                 rte_latencystats_uninit();
2161 #endif
2162                 force_quit();
2163                 /* exit with the expected status */
2164                 signal(signum, SIG_DFL);
2165                 kill(getpid(), signum);
2166         }
2167 }
2168
2169 int
2170 main(int argc, char** argv)
2171 {
2172         int  diag;
2173         uint8_t port_id;
2174
2175         signal(SIGINT, signal_handler);
2176         signal(SIGTERM, signal_handler);
2177
2178         diag = rte_eal_init(argc, argv);
2179         if (diag < 0)
2180                 rte_panic("Cannot init EAL\n");
2181
2182 #ifdef RTE_LIBRTE_PDUMP
2183         /* initialize packet capture framework */
2184         rte_pdump_init(NULL);
2185 #endif
2186
2187         nb_ports = (portid_t) rte_eth_dev_count();
2188         if (nb_ports == 0)
2189                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2190
2191         /* allocate port structures, and init them */
2192         init_port();
2193
2194         set_def_fwd_config();
2195         if (nb_lcores == 0)
2196                 rte_panic("Empty set of forwarding logical cores - check the "
2197                           "core mask supplied in the command parameters\n");
2198
2199         argc -= diag;
2200         argv += diag;
2201         if (argc > 1)
2202                 launch_args_parse(argc, argv);
2203
2204         if (!nb_rxq && !nb_txq)
2205                 printf("Warning: Either rx or tx queues should be non-zero\n");
2206
2207         if (nb_rxq > 1 && nb_rxq > nb_txq)
2208                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2209                        "but nb_txq=%d will prevent to fully test it.\n",
2210                        nb_rxq, nb_txq);
2211
2212         init_config();
2213         if (start_port(RTE_PORT_ALL) != 0)
2214                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2215
2216         /* set all ports to promiscuous mode by default */
2217         RTE_ETH_FOREACH_DEV(port_id)
2218                 rte_eth_promiscuous_enable(port_id);
2219
2220         /* Init metrics library */
2221         rte_metrics_init(rte_socket_id());
2222
2223 #ifdef RTE_LIBRTE_LATENCY_STATS
2224         if (latencystats_enabled != 0) {
2225                 int ret = rte_latencystats_init(1, NULL);
2226                 if (ret)
2227                         printf("Warning: latencystats init()"
2228                                 " returned error %d\n", ret);
2229                 printf("Latencystats running on lcore %d\n",
2230                         latencystats_lcore_id);
2231         }
2232 #endif
2233
2234         /* Setup bitrate stats */
2235 #ifdef RTE_LIBRTE_BITRATE
2236         bitrate_data = rte_stats_bitrate_create();
2237         if (bitrate_data == NULL)
2238                 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2239         rte_stats_bitrate_reg(bitrate_data);
2240 #endif
2241
2242
2243 #ifdef RTE_LIBRTE_CMDLINE
2244         if (interactive == 1) {
2245                 if (auto_start) {
2246                         printf("Start automatic packet forwarding\n");
2247                         start_packet_forwarding(0);
2248                 }
2249                 prompt();
2250         } else
2251 #endif
2252         {
2253                 char c;
2254                 int rc;
2255
2256                 printf("No commandline core given, start packet forwarding\n");
2257                 start_packet_forwarding(0);
2258                 printf("Press enter to exit\n");
2259                 rc = read(0, &c, 1);
2260                 pmd_test_exit();
2261                 if (rc < 0)
2262                         return 1;
2263         }
2264
2265         return 0;
2266 }