6526018517234421516027a120204d8fda6ff959
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 1; /**< numa enabled by default */
107
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;        /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156         &io_fwd_engine,
157         &mac_fwd_engine,
158         &mac_swap_engine,
159         &flow_gen_engine,
160         &rx_only_engine,
161         &tx_only_engine,
162         &csum_fwd_engine,
163         &icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165         &ieee1588_fwd_engine,
166 #endif
167         NULL,
168 };
169
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185         TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272
273 /*
274  * Enable link status change notification
275  */
276 uint8_t lsc_interrupt = 1; /* enabled by default */
277
278 /*
279  * Enable device removal notification.
280  */
281 uint8_t rmv_interrupt = 1; /* enabled by default */
282
283 /*
284  * NIC bypass mode configuration options.
285  */
286 #ifdef RTE_NIC_BYPASS
287
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
290
291 #endif
292
293 #ifdef RTE_LIBRTE_LATENCY_STATS
294
295 /*
296  * Set when latency stats is enabled in the commandline
297  */
298 uint8_t latencystats_enabled;
299
300 /*
301  * Lcore ID to serive latency statistics.
302  */
303 lcoreid_t latencystats_lcore_id = -1;
304
305 #endif
306
307 /*
308  * Ethernet device configuration.
309  */
310 struct rte_eth_rxmode rx_mode = {
311         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
312         .split_hdr_size = 0,
313         .header_split   = 0, /**< Header Split disabled. */
314         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
317         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
319         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
320 };
321
322 struct rte_fdir_conf fdir_conf = {
323         .mode = RTE_FDIR_MODE_NONE,
324         .pballoc = RTE_FDIR_PBALLOC_64K,
325         .status = RTE_FDIR_REPORT_STATUS,
326         .mask = {
327                 .vlan_tci_mask = 0x0,
328                 .ipv4_mask     = {
329                         .src_ip = 0xFFFFFFFF,
330                         .dst_ip = 0xFFFFFFFF,
331                 },
332                 .ipv6_mask     = {
333                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335                 },
336                 .src_port_mask = 0xFFFF,
337                 .dst_port_mask = 0xFFFF,
338                 .mac_addr_byte_mask = 0xFF,
339                 .tunnel_type_mask = 1,
340                 .tunnel_id_mask = 0xFFFFFFFF,
341         },
342         .drop_queue = 127,
343 };
344
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
346
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
349
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
352
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
355
356 unsigned max_socket = 0;
357
358 #ifdef RTE_LIBRTE_BITRATE
359 /* Bitrate statistics */
360 struct rte_stats_bitrates *bitrate_data;
361 lcoreid_t bitrate_lcore_id;
362 uint8_t bitrate_enabled;
363 #endif
364
365 /* Forward function declarations */
366 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
367 static void check_all_ports_link_status(uint32_t port_mask);
368 static void eth_event_callback(uint8_t port_id,
369                                enum rte_eth_event_type type,
370                                void *param);
371
372 /*
373  * Check if all the ports are started.
374  * If yes, return positive value. If not, return zero.
375  */
376 static int all_ports_started(void);
377
378 /*
379  * Setup default configuration.
380  */
381 static void
382 set_default_fwd_lcores_config(void)
383 {
384         unsigned int i;
385         unsigned int nb_lc;
386         unsigned int sock_num;
387
388         nb_lc = 0;
389         for (i = 0; i < RTE_MAX_LCORE; i++) {
390                 sock_num = rte_lcore_to_socket_id(i) + 1;
391                 if (sock_num > max_socket) {
392                         if (sock_num > RTE_MAX_NUMA_NODES)
393                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
394                         max_socket = sock_num;
395                 }
396                 if (!rte_lcore_is_enabled(i))
397                         continue;
398                 if (i == rte_get_master_lcore())
399                         continue;
400                 fwd_lcores_cpuids[nb_lc++] = i;
401         }
402         nb_lcores = (lcoreid_t) nb_lc;
403         nb_cfg_lcores = nb_lcores;
404         nb_fwd_lcores = 1;
405 }
406
407 static void
408 set_def_peer_eth_addrs(void)
409 {
410         portid_t i;
411
412         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
413                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
414                 peer_eth_addrs[i].addr_bytes[5] = i;
415         }
416 }
417
418 static void
419 set_default_fwd_ports_config(void)
420 {
421         portid_t pt_id;
422
423         for (pt_id = 0; pt_id < nb_ports; pt_id++)
424                 fwd_ports_ids[pt_id] = pt_id;
425
426         nb_cfg_ports = nb_ports;
427         nb_fwd_ports = nb_ports;
428 }
429
430 void
431 set_def_fwd_config(void)
432 {
433         set_default_fwd_lcores_config();
434         set_def_peer_eth_addrs();
435         set_default_fwd_ports_config();
436 }
437
438 /*
439  * Configuration initialisation done once at init time.
440  */
441 static void
442 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
443                  unsigned int socket_id)
444 {
445         char pool_name[RTE_MEMPOOL_NAMESIZE];
446         struct rte_mempool *rte_mp = NULL;
447         uint32_t mb_size;
448
449         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
450         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
451
452         RTE_LOG(INFO, USER1,
453                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
454                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
455
456 #ifdef RTE_LIBRTE_PMD_XENVIRT
457         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
458                 (unsigned) mb_mempool_cache,
459                 sizeof(struct rte_pktmbuf_pool_private),
460                 rte_pktmbuf_pool_init, NULL,
461                 rte_pktmbuf_init, NULL,
462                 socket_id, 0);
463 #endif
464
465         /* if the former XEN allocation failed fall back to normal allocation */
466         if (rte_mp == NULL) {
467                 if (mp_anon != 0) {
468                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
469                                 mb_size, (unsigned) mb_mempool_cache,
470                                 sizeof(struct rte_pktmbuf_pool_private),
471                                 socket_id, 0);
472                         if (rte_mp == NULL)
473                                 goto err;
474
475                         if (rte_mempool_populate_anon(rte_mp) == 0) {
476                                 rte_mempool_free(rte_mp);
477                                 rte_mp = NULL;
478                                 goto err;
479                         }
480                         rte_pktmbuf_pool_init(rte_mp, NULL);
481                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
482                 } else {
483                         /* wrapper to rte_mempool_create() */
484                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
485                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
486                 }
487         }
488
489 err:
490         if (rte_mp == NULL) {
491                 rte_exit(EXIT_FAILURE,
492                         "Creation of mbuf pool for socket %u failed: %s\n",
493                         socket_id, rte_strerror(rte_errno));
494         } else if (verbose_level > 0) {
495                 rte_mempool_dump(stdout, rte_mp);
496         }
497 }
498
499 /*
500  * Check given socket id is valid or not with NUMA mode,
501  * if valid, return 0, else return -1
502  */
503 static int
504 check_socket_id(const unsigned int socket_id)
505 {
506         static int warning_once = 0;
507
508         if (socket_id >= max_socket) {
509                 if (!warning_once && numa_support)
510                         printf("Warning: NUMA should be configured manually by"
511                                " using --port-numa-config and"
512                                " --ring-numa-config parameters along with"
513                                " --numa.\n");
514                 warning_once = 1;
515                 return -1;
516         }
517         return 0;
518 }
519
520 static void
521 init_config(void)
522 {
523         portid_t pid;
524         struct rte_port *port;
525         struct rte_mempool *mbp;
526         unsigned int nb_mbuf_per_pool;
527         lcoreid_t  lc_id;
528         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
529
530         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
531         /* Configuration of logical cores. */
532         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
533                                 sizeof(struct fwd_lcore *) * nb_lcores,
534                                 RTE_CACHE_LINE_SIZE);
535         if (fwd_lcores == NULL) {
536                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
537                                                         "failed\n", nb_lcores);
538         }
539         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
540                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
541                                                sizeof(struct fwd_lcore),
542                                                RTE_CACHE_LINE_SIZE);
543                 if (fwd_lcores[lc_id] == NULL) {
544                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
545                                                                 "failed\n");
546                 }
547                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
548         }
549
550         RTE_ETH_FOREACH_DEV(pid) {
551                 port = &ports[pid];
552                 rte_eth_dev_info_get(pid, &port->dev_info);
553
554                 if (numa_support) {
555                         if (port_numa[pid] != NUMA_NO_CONFIG)
556                                 port_per_socket[port_numa[pid]]++;
557                         else {
558                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
559
560                                 /* if socket_id is invalid, set to 0 */
561                                 if (check_socket_id(socket_id) < 0)
562                                         socket_id = 0;
563                                 port_per_socket[socket_id]++;
564                         }
565                 }
566
567                 /* set flag to initialize port/queue */
568                 port->need_reconfig = 1;
569                 port->need_reconfig_queues = 1;
570         }
571
572         /*
573          * Create pools of mbuf.
574          * If NUMA support is disabled, create a single pool of mbuf in
575          * socket 0 memory by default.
576          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
577          *
578          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
579          * nb_txd can be configured at run time.
580          */
581         if (param_total_num_mbufs)
582                 nb_mbuf_per_pool = param_total_num_mbufs;
583         else {
584                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
585                         (nb_lcores * mb_mempool_cache) +
586                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
587                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
588         }
589
590         if (numa_support) {
591                 uint8_t i;
592
593                 for (i = 0; i < max_socket; i++)
594                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
595         } else {
596                 if (socket_num == UMA_NO_CONFIG)
597                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
598                 else
599                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
600                                                  socket_num);
601         }
602
603         init_port_config();
604
605         /*
606          * Records which Mbuf pool to use by each logical core, if needed.
607          */
608         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
609                 mbp = mbuf_pool_find(
610                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
611
612                 if (mbp == NULL)
613                         mbp = mbuf_pool_find(0);
614                 fwd_lcores[lc_id]->mbp = mbp;
615         }
616
617         /* Configuration of packet forwarding streams. */
618         if (init_fwd_streams() < 0)
619                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
620
621         fwd_config_setup();
622 }
623
624
625 void
626 reconfig(portid_t new_port_id, unsigned socket_id)
627 {
628         struct rte_port *port;
629
630         /* Reconfiguration of Ethernet ports. */
631         port = &ports[new_port_id];
632         rte_eth_dev_info_get(new_port_id, &port->dev_info);
633
634         /* set flag to initialize port/queue */
635         port->need_reconfig = 1;
636         port->need_reconfig_queues = 1;
637         port->socket_id = socket_id;
638
639         init_port_config();
640 }
641
642
643 int
644 init_fwd_streams(void)
645 {
646         portid_t pid;
647         struct rte_port *port;
648         streamid_t sm_id, nb_fwd_streams_new;
649         queueid_t q;
650
651         /* set socket id according to numa or not */
652         RTE_ETH_FOREACH_DEV(pid) {
653                 port = &ports[pid];
654                 if (nb_rxq > port->dev_info.max_rx_queues) {
655                         printf("Fail: nb_rxq(%d) is greater than "
656                                 "max_rx_queues(%d)\n", nb_rxq,
657                                 port->dev_info.max_rx_queues);
658                         return -1;
659                 }
660                 if (nb_txq > port->dev_info.max_tx_queues) {
661                         printf("Fail: nb_txq(%d) is greater than "
662                                 "max_tx_queues(%d)\n", nb_txq,
663                                 port->dev_info.max_tx_queues);
664                         return -1;
665                 }
666                 if (numa_support) {
667                         if (port_numa[pid] != NUMA_NO_CONFIG)
668                                 port->socket_id = port_numa[pid];
669                         else {
670                                 port->socket_id = rte_eth_dev_socket_id(pid);
671
672                                 /* if socket_id is invalid, set to 0 */
673                                 if (check_socket_id(port->socket_id) < 0)
674                                         port->socket_id = 0;
675                         }
676                 }
677                 else {
678                         if (socket_num == UMA_NO_CONFIG)
679                                 port->socket_id = 0;
680                         else
681                                 port->socket_id = socket_num;
682                 }
683         }
684
685         q = RTE_MAX(nb_rxq, nb_txq);
686         if (q == 0) {
687                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
688                 return -1;
689         }
690         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
691         if (nb_fwd_streams_new == nb_fwd_streams)
692                 return 0;
693         /* clear the old */
694         if (fwd_streams != NULL) {
695                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696                         if (fwd_streams[sm_id] == NULL)
697                                 continue;
698                         rte_free(fwd_streams[sm_id]);
699                         fwd_streams[sm_id] = NULL;
700                 }
701                 rte_free(fwd_streams);
702                 fwd_streams = NULL;
703         }
704
705         /* init new */
706         nb_fwd_streams = nb_fwd_streams_new;
707         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
708                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
709         if (fwd_streams == NULL)
710                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
711                                                 "failed\n", nb_fwd_streams);
712
713         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
714                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
715                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
716                 if (fwd_streams[sm_id] == NULL)
717                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
718                                                                 " failed\n");
719         }
720
721         return 0;
722 }
723
724 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
725 static void
726 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
727 {
728         unsigned int total_burst;
729         unsigned int nb_burst;
730         unsigned int burst_stats[3];
731         uint16_t pktnb_stats[3];
732         uint16_t nb_pkt;
733         int burst_percent[3];
734
735         /*
736          * First compute the total number of packet bursts and the
737          * two highest numbers of bursts of the same number of packets.
738          */
739         total_burst = 0;
740         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
741         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
742         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
743                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
744                 if (nb_burst == 0)
745                         continue;
746                 total_burst += nb_burst;
747                 if (nb_burst > burst_stats[0]) {
748                         burst_stats[1] = burst_stats[0];
749                         pktnb_stats[1] = pktnb_stats[0];
750                         burst_stats[0] = nb_burst;
751                         pktnb_stats[0] = nb_pkt;
752                 }
753         }
754         if (total_burst == 0)
755                 return;
756         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
757         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
758                burst_percent[0], (int) pktnb_stats[0]);
759         if (burst_stats[0] == total_burst) {
760                 printf("]\n");
761                 return;
762         }
763         if (burst_stats[0] + burst_stats[1] == total_burst) {
764                 printf(" + %d%% of %d pkts]\n",
765                        100 - burst_percent[0], pktnb_stats[1]);
766                 return;
767         }
768         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
769         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
770         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
771                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
772                 return;
773         }
774         printf(" + %d%% of %d pkts + %d%% of others]\n",
775                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
776 }
777 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
778
779 static void
780 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
781 {
782         struct rte_port *port;
783         uint8_t i;
784
785         static const char *fwd_stats_border = "----------------------";
786
787         port = &ports[port_id];
788         printf("\n  %s Forward statistics for port %-2d %s\n",
789                fwd_stats_border, port_id, fwd_stats_border);
790
791         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
792                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
793                        "%-"PRIu64"\n",
794                        stats->ipackets, stats->imissed,
795                        (uint64_t) (stats->ipackets + stats->imissed));
796
797                 if (cur_fwd_eng == &csum_fwd_engine)
798                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
799                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
800                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
801                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
802                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
803                 }
804
805                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
806                        "%-"PRIu64"\n",
807                        stats->opackets, port->tx_dropped,
808                        (uint64_t) (stats->opackets + port->tx_dropped));
809         }
810         else {
811                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
812                        "%14"PRIu64"\n",
813                        stats->ipackets, stats->imissed,
814                        (uint64_t) (stats->ipackets + stats->imissed));
815
816                 if (cur_fwd_eng == &csum_fwd_engine)
817                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
818                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
819                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
820                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
821                         printf("  RX-nombufs:             %14"PRIu64"\n",
822                                stats->rx_nombuf);
823                 }
824
825                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
826                        "%14"PRIu64"\n",
827                        stats->opackets, port->tx_dropped,
828                        (uint64_t) (stats->opackets + port->tx_dropped));
829         }
830
831 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
832         if (port->rx_stream)
833                 pkt_burst_stats_display("RX",
834                         &port->rx_stream->rx_burst_stats);
835         if (port->tx_stream)
836                 pkt_burst_stats_display("TX",
837                         &port->tx_stream->tx_burst_stats);
838 #endif
839
840         if (port->rx_queue_stats_mapping_enabled) {
841                 printf("\n");
842                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
843                         printf("  Stats reg %2d RX-packets:%14"PRIu64
844                                "     RX-errors:%14"PRIu64
845                                "    RX-bytes:%14"PRIu64"\n",
846                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
847                 }
848                 printf("\n");
849         }
850         if (port->tx_queue_stats_mapping_enabled) {
851                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
852                         printf("  Stats reg %2d TX-packets:%14"PRIu64
853                                "                                 TX-bytes:%14"PRIu64"\n",
854                                i, stats->q_opackets[i], stats->q_obytes[i]);
855                 }
856         }
857
858         printf("  %s--------------------------------%s\n",
859                fwd_stats_border, fwd_stats_border);
860 }
861
862 static void
863 fwd_stream_stats_display(streamid_t stream_id)
864 {
865         struct fwd_stream *fs;
866         static const char *fwd_top_stats_border = "-------";
867
868         fs = fwd_streams[stream_id];
869         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
870             (fs->fwd_dropped == 0))
871                 return;
872         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
873                "TX Port=%2d/Queue=%2d %s\n",
874                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
875                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
876         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
877                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
878
879         /* if checksum mode */
880         if (cur_fwd_eng == &csum_fwd_engine) {
881                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
882                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
883         }
884
885 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
886         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
887         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
888 #endif
889 }
890
891 static void
892 flush_fwd_rx_queues(void)
893 {
894         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
895         portid_t  rxp;
896         portid_t port_id;
897         queueid_t rxq;
898         uint16_t  nb_rx;
899         uint16_t  i;
900         uint8_t   j;
901         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
902         uint64_t timer_period;
903
904         /* convert to number of cycles */
905         timer_period = rte_get_timer_hz(); /* 1 second timeout */
906
907         for (j = 0; j < 2; j++) {
908                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
909                         for (rxq = 0; rxq < nb_rxq; rxq++) {
910                                 port_id = fwd_ports_ids[rxp];
911                                 /**
912                                 * testpmd can stuck in the below do while loop
913                                 * if rte_eth_rx_burst() always returns nonzero
914                                 * packets. So timer is added to exit this loop
915                                 * after 1sec timer expiry.
916                                 */
917                                 prev_tsc = rte_rdtsc();
918                                 do {
919                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
920                                                 pkts_burst, MAX_PKT_BURST);
921                                         for (i = 0; i < nb_rx; i++)
922                                                 rte_pktmbuf_free(pkts_burst[i]);
923
924                                         cur_tsc = rte_rdtsc();
925                                         diff_tsc = cur_tsc - prev_tsc;
926                                         timer_tsc += diff_tsc;
927                                 } while ((nb_rx > 0) &&
928                                         (timer_tsc < timer_period));
929                                 timer_tsc = 0;
930                         }
931                 }
932                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
933         }
934 }
935
936 static void
937 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
938 {
939         struct fwd_stream **fsm;
940         streamid_t nb_fs;
941         streamid_t sm_id;
942 #ifdef RTE_LIBRTE_BITRATE
943         uint64_t tics_per_1sec;
944         uint64_t tics_datum;
945         uint64_t tics_current;
946         uint8_t idx_port, cnt_ports;
947
948         cnt_ports = rte_eth_dev_count();
949         tics_datum = rte_rdtsc();
950         tics_per_1sec = rte_get_timer_hz();
951 #endif
952         fsm = &fwd_streams[fc->stream_idx];
953         nb_fs = fc->stream_nb;
954         do {
955                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
956                         (*pkt_fwd)(fsm[sm_id]);
957 #ifdef RTE_LIBRTE_BITRATE
958                 if (bitrate_enabled != 0 &&
959                                 bitrate_lcore_id == rte_lcore_id()) {
960                         tics_current = rte_rdtsc();
961                         if (tics_current - tics_datum >= tics_per_1sec) {
962                                 /* Periodic bitrate calculation */
963                                 for (idx_port = 0;
964                                                 idx_port < cnt_ports;
965                                                 idx_port++)
966                                         rte_stats_bitrate_calc(bitrate_data,
967                                                 idx_port);
968                                 tics_datum = tics_current;
969                         }
970                 }
971 #endif
972 #ifdef RTE_LIBRTE_LATENCY_STATS
973                 if (latencystats_lcore_id == rte_lcore_id())
974                         rte_latencystats_update();
975 #endif
976
977         } while (! fc->stopped);
978 }
979
980 static int
981 start_pkt_forward_on_core(void *fwd_arg)
982 {
983         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
984                              cur_fwd_config.fwd_eng->packet_fwd);
985         return 0;
986 }
987
988 /*
989  * Run the TXONLY packet forwarding engine to send a single burst of packets.
990  * Used to start communication flows in network loopback test configurations.
991  */
992 static int
993 run_one_txonly_burst_on_core(void *fwd_arg)
994 {
995         struct fwd_lcore *fwd_lc;
996         struct fwd_lcore tmp_lcore;
997
998         fwd_lc = (struct fwd_lcore *) fwd_arg;
999         tmp_lcore = *fwd_lc;
1000         tmp_lcore.stopped = 1;
1001         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1002         return 0;
1003 }
1004
1005 /*
1006  * Launch packet forwarding:
1007  *     - Setup per-port forwarding context.
1008  *     - launch logical cores with their forwarding configuration.
1009  */
1010 static void
1011 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1012 {
1013         port_fwd_begin_t port_fwd_begin;
1014         unsigned int i;
1015         unsigned int lc_id;
1016         int diag;
1017
1018         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1019         if (port_fwd_begin != NULL) {
1020                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1021                         (*port_fwd_begin)(fwd_ports_ids[i]);
1022         }
1023         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1024                 lc_id = fwd_lcores_cpuids[i];
1025                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1026                         fwd_lcores[i]->stopped = 0;
1027                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1028                                                      fwd_lcores[i], lc_id);
1029                         if (diag != 0)
1030                                 printf("launch lcore %u failed - diag=%d\n",
1031                                        lc_id, diag);
1032                 }
1033         }
1034 }
1035
1036 /*
1037  * Launch packet forwarding configuration.
1038  */
1039 void
1040 start_packet_forwarding(int with_tx_first)
1041 {
1042         port_fwd_begin_t port_fwd_begin;
1043         port_fwd_end_t  port_fwd_end;
1044         struct rte_port *port;
1045         unsigned int i;
1046         portid_t   pt_id;
1047         streamid_t sm_id;
1048
1049         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1050                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1051
1052         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1053                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1054
1055         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1056                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1057                 (!nb_rxq || !nb_txq))
1058                 rte_exit(EXIT_FAILURE,
1059                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1060                         cur_fwd_eng->fwd_mode_name);
1061
1062         if (all_ports_started() == 0) {
1063                 printf("Not all ports were started\n");
1064                 return;
1065         }
1066         if (test_done == 0) {
1067                 printf("Packet forwarding already started\n");
1068                 return;
1069         }
1070
1071         if (init_fwd_streams() < 0) {
1072                 printf("Fail from init_fwd_streams()\n");
1073                 return;
1074         }
1075
1076         if(dcb_test) {
1077                 for (i = 0; i < nb_fwd_ports; i++) {
1078                         pt_id = fwd_ports_ids[i];
1079                         port = &ports[pt_id];
1080                         if (!port->dcb_flag) {
1081                                 printf("In DCB mode, all forwarding ports must "
1082                                        "be configured in this mode.\n");
1083                                 return;
1084                         }
1085                 }
1086                 if (nb_fwd_lcores == 1) {
1087                         printf("In DCB mode,the nb forwarding cores "
1088                                "should be larger than 1.\n");
1089                         return;
1090                 }
1091         }
1092         test_done = 0;
1093
1094         if(!no_flush_rx)
1095                 flush_fwd_rx_queues();
1096
1097         fwd_config_setup();
1098         pkt_fwd_config_display(&cur_fwd_config);
1099         rxtx_config_display();
1100
1101         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1102                 pt_id = fwd_ports_ids[i];
1103                 port = &ports[pt_id];
1104                 rte_eth_stats_get(pt_id, &port->stats);
1105                 port->tx_dropped = 0;
1106
1107                 map_port_queue_stats_mapping_registers(pt_id, port);
1108         }
1109         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1110                 fwd_streams[sm_id]->rx_packets = 0;
1111                 fwd_streams[sm_id]->tx_packets = 0;
1112                 fwd_streams[sm_id]->fwd_dropped = 0;
1113                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1114                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1115
1116 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1117                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1118                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1119                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1120                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1121 #endif
1122 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1123                 fwd_streams[sm_id]->core_cycles = 0;
1124 #endif
1125         }
1126         if (with_tx_first) {
1127                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1128                 if (port_fwd_begin != NULL) {
1129                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1130                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1131                 }
1132                 while (with_tx_first--) {
1133                         launch_packet_forwarding(
1134                                         run_one_txonly_burst_on_core);
1135                         rte_eal_mp_wait_lcore();
1136                 }
1137                 port_fwd_end = tx_only_engine.port_fwd_end;
1138                 if (port_fwd_end != NULL) {
1139                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1140                                 (*port_fwd_end)(fwd_ports_ids[i]);
1141                 }
1142         }
1143         launch_packet_forwarding(start_pkt_forward_on_core);
1144 }
1145
1146 void
1147 stop_packet_forwarding(void)
1148 {
1149         struct rte_eth_stats stats;
1150         struct rte_port *port;
1151         port_fwd_end_t  port_fwd_end;
1152         int i;
1153         portid_t   pt_id;
1154         streamid_t sm_id;
1155         lcoreid_t  lc_id;
1156         uint64_t total_recv;
1157         uint64_t total_xmit;
1158         uint64_t total_rx_dropped;
1159         uint64_t total_tx_dropped;
1160         uint64_t total_rx_nombuf;
1161         uint64_t tx_dropped;
1162         uint64_t rx_bad_ip_csum;
1163         uint64_t rx_bad_l4_csum;
1164 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1165         uint64_t fwd_cycles;
1166 #endif
1167         static const char *acc_stats_border = "+++++++++++++++";
1168
1169         if (test_done) {
1170                 printf("Packet forwarding not started\n");
1171                 return;
1172         }
1173         printf("Telling cores to stop...");
1174         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1175                 fwd_lcores[lc_id]->stopped = 1;
1176         printf("\nWaiting for lcores to finish...\n");
1177         rte_eal_mp_wait_lcore();
1178         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1179         if (port_fwd_end != NULL) {
1180                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1181                         pt_id = fwd_ports_ids[i];
1182                         (*port_fwd_end)(pt_id);
1183                 }
1184         }
1185 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1186         fwd_cycles = 0;
1187 #endif
1188         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1189                 if (cur_fwd_config.nb_fwd_streams >
1190                     cur_fwd_config.nb_fwd_ports) {
1191                         fwd_stream_stats_display(sm_id);
1192                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1193                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1194                 } else {
1195                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1196                                 fwd_streams[sm_id];
1197                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1198                                 fwd_streams[sm_id];
1199                 }
1200                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1201                 tx_dropped = (uint64_t) (tx_dropped +
1202                                          fwd_streams[sm_id]->fwd_dropped);
1203                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1204
1205                 rx_bad_ip_csum =
1206                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1207                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1208                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1209                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1210                                                         rx_bad_ip_csum;
1211
1212                 rx_bad_l4_csum =
1213                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1214                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1215                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1216                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1217                                                         rx_bad_l4_csum;
1218
1219 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1220                 fwd_cycles = (uint64_t) (fwd_cycles +
1221                                          fwd_streams[sm_id]->core_cycles);
1222 #endif
1223         }
1224         total_recv = 0;
1225         total_xmit = 0;
1226         total_rx_dropped = 0;
1227         total_tx_dropped = 0;
1228         total_rx_nombuf  = 0;
1229         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1230                 pt_id = fwd_ports_ids[i];
1231
1232                 port = &ports[pt_id];
1233                 rte_eth_stats_get(pt_id, &stats);
1234                 stats.ipackets -= port->stats.ipackets;
1235                 port->stats.ipackets = 0;
1236                 stats.opackets -= port->stats.opackets;
1237                 port->stats.opackets = 0;
1238                 stats.ibytes   -= port->stats.ibytes;
1239                 port->stats.ibytes = 0;
1240                 stats.obytes   -= port->stats.obytes;
1241                 port->stats.obytes = 0;
1242                 stats.imissed  -= port->stats.imissed;
1243                 port->stats.imissed = 0;
1244                 stats.oerrors  -= port->stats.oerrors;
1245                 port->stats.oerrors = 0;
1246                 stats.rx_nombuf -= port->stats.rx_nombuf;
1247                 port->stats.rx_nombuf = 0;
1248
1249                 total_recv += stats.ipackets;
1250                 total_xmit += stats.opackets;
1251                 total_rx_dropped += stats.imissed;
1252                 total_tx_dropped += port->tx_dropped;
1253                 total_rx_nombuf  += stats.rx_nombuf;
1254
1255                 fwd_port_stats_display(pt_id, &stats);
1256         }
1257         printf("\n  %s Accumulated forward statistics for all ports"
1258                "%s\n",
1259                acc_stats_border, acc_stats_border);
1260         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1261                "%-"PRIu64"\n"
1262                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1263                "%-"PRIu64"\n",
1264                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1265                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1266         if (total_rx_nombuf > 0)
1267                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1268         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1269                "%s\n",
1270                acc_stats_border, acc_stats_border);
1271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1272         if (total_recv > 0)
1273                 printf("\n  CPU cycles/packet=%u (total cycles="
1274                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1275                        (unsigned int)(fwd_cycles / total_recv),
1276                        fwd_cycles, total_recv);
1277 #endif
1278         printf("\nDone.\n");
1279         test_done = 1;
1280 }
1281
1282 void
1283 dev_set_link_up(portid_t pid)
1284 {
1285         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1286                 printf("\nSet link up fail.\n");
1287 }
1288
1289 void
1290 dev_set_link_down(portid_t pid)
1291 {
1292         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1293                 printf("\nSet link down fail.\n");
1294 }
1295
1296 static int
1297 all_ports_started(void)
1298 {
1299         portid_t pi;
1300         struct rte_port *port;
1301
1302         RTE_ETH_FOREACH_DEV(pi) {
1303                 port = &ports[pi];
1304                 /* Check if there is a port which is not started */
1305                 if ((port->port_status != RTE_PORT_STARTED) &&
1306                         (port->slave_flag == 0))
1307                         return 0;
1308         }
1309
1310         /* No port is not started */
1311         return 1;
1312 }
1313
1314 int
1315 all_ports_stopped(void)
1316 {
1317         portid_t pi;
1318         struct rte_port *port;
1319
1320         RTE_ETH_FOREACH_DEV(pi) {
1321                 port = &ports[pi];
1322                 if ((port->port_status != RTE_PORT_STOPPED) &&
1323                         (port->slave_flag == 0))
1324                         return 0;
1325         }
1326
1327         return 1;
1328 }
1329
1330 int
1331 port_is_started(portid_t port_id)
1332 {
1333         if (port_id_is_invalid(port_id, ENABLED_WARN))
1334                 return 0;
1335
1336         if (ports[port_id].port_status != RTE_PORT_STARTED)
1337                 return 0;
1338
1339         return 1;
1340 }
1341
1342 static int
1343 port_is_closed(portid_t port_id)
1344 {
1345         if (port_id_is_invalid(port_id, ENABLED_WARN))
1346                 return 0;
1347
1348         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1349                 return 0;
1350
1351         return 1;
1352 }
1353
1354 int
1355 start_port(portid_t pid)
1356 {
1357         int diag, need_check_link_status = -1;
1358         portid_t pi;
1359         queueid_t qi;
1360         struct rte_port *port;
1361         struct ether_addr mac_addr;
1362         enum rte_eth_event_type event_type;
1363
1364         if (port_id_is_invalid(pid, ENABLED_WARN))
1365                 return 0;
1366
1367         if(dcb_config)
1368                 dcb_test = 1;
1369         RTE_ETH_FOREACH_DEV(pi) {
1370                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1371                         continue;
1372
1373                 need_check_link_status = 0;
1374                 port = &ports[pi];
1375                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1376                                                  RTE_PORT_HANDLING) == 0) {
1377                         printf("Port %d is now not stopped\n", pi);
1378                         continue;
1379                 }
1380
1381                 if (port->need_reconfig > 0) {
1382                         port->need_reconfig = 0;
1383
1384                         printf("Configuring Port %d (socket %u)\n", pi,
1385                                         port->socket_id);
1386                         /* configure port */
1387                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1388                                                 &(port->dev_conf));
1389                         if (diag != 0) {
1390                                 if (rte_atomic16_cmpset(&(port->port_status),
1391                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392                                         printf("Port %d can not be set back "
1393                                                         "to stopped\n", pi);
1394                                 printf("Fail to configure port %d\n", pi);
1395                                 /* try to reconfigure port next time */
1396                                 port->need_reconfig = 1;
1397                                 return -1;
1398                         }
1399                 }
1400                 if (port->need_reconfig_queues > 0) {
1401                         port->need_reconfig_queues = 0;
1402                         /* setup tx queues */
1403                         for (qi = 0; qi < nb_txq; qi++) {
1404                                 if ((numa_support) &&
1405                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1406                                         diag = rte_eth_tx_queue_setup(pi, qi,
1407                                                 nb_txd,txring_numa[pi],
1408                                                 &(port->tx_conf));
1409                                 else
1410                                         diag = rte_eth_tx_queue_setup(pi, qi,
1411                                                 nb_txd,port->socket_id,
1412                                                 &(port->tx_conf));
1413
1414                                 if (diag == 0)
1415                                         continue;
1416
1417                                 /* Fail to setup tx queue, return */
1418                                 if (rte_atomic16_cmpset(&(port->port_status),
1419                                                         RTE_PORT_HANDLING,
1420                                                         RTE_PORT_STOPPED) == 0)
1421                                         printf("Port %d can not be set back "
1422                                                         "to stopped\n", pi);
1423                                 printf("Fail to configure port %d tx queues\n", pi);
1424                                 /* try to reconfigure queues next time */
1425                                 port->need_reconfig_queues = 1;
1426                                 return -1;
1427                         }
1428                         /* setup rx queues */
1429                         for (qi = 0; qi < nb_rxq; qi++) {
1430                                 if ((numa_support) &&
1431                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1432                                         struct rte_mempool * mp =
1433                                                 mbuf_pool_find(rxring_numa[pi]);
1434                                         if (mp == NULL) {
1435                                                 printf("Failed to setup RX queue:"
1436                                                         "No mempool allocation"
1437                                                         " on the socket %d\n",
1438                                                         rxring_numa[pi]);
1439                                                 return -1;
1440                                         }
1441
1442                                         diag = rte_eth_rx_queue_setup(pi, qi,
1443                                              nb_rxd,rxring_numa[pi],
1444                                              &(port->rx_conf),mp);
1445                                 } else {
1446                                         struct rte_mempool *mp =
1447                                                 mbuf_pool_find(port->socket_id);
1448                                         if (mp == NULL) {
1449                                                 printf("Failed to setup RX queue:"
1450                                                         "No mempool allocation"
1451                                                         " on the socket %d\n",
1452                                                         port->socket_id);
1453                                                 return -1;
1454                                         }
1455                                         diag = rte_eth_rx_queue_setup(pi, qi,
1456                                              nb_rxd,port->socket_id,
1457                                              &(port->rx_conf), mp);
1458                                 }
1459                                 if (diag == 0)
1460                                         continue;
1461
1462                                 /* Fail to setup rx queue, return */
1463                                 if (rte_atomic16_cmpset(&(port->port_status),
1464                                                         RTE_PORT_HANDLING,
1465                                                         RTE_PORT_STOPPED) == 0)
1466                                         printf("Port %d can not be set back "
1467                                                         "to stopped\n", pi);
1468                                 printf("Fail to configure port %d rx queues\n", pi);
1469                                 /* try to reconfigure queues next time */
1470                                 port->need_reconfig_queues = 1;
1471                                 return -1;
1472                         }
1473                 }
1474
1475                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1476                      event_type < RTE_ETH_EVENT_MAX;
1477                      event_type++) {
1478                         diag = rte_eth_dev_callback_register(pi,
1479                                                         event_type,
1480                                                         eth_event_callback,
1481                                                         NULL);
1482                         if (diag) {
1483                                 printf("Failed to setup even callback for event %d\n",
1484                                         event_type);
1485                                 return -1;
1486                         }
1487                 }
1488
1489                 /* start port */
1490                 if (rte_eth_dev_start(pi) < 0) {
1491                         printf("Fail to start port %d\n", pi);
1492
1493                         /* Fail to setup rx queue, return */
1494                         if (rte_atomic16_cmpset(&(port->port_status),
1495                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1496                                 printf("Port %d can not be set back to "
1497                                                         "stopped\n", pi);
1498                         continue;
1499                 }
1500
1501                 if (rte_atomic16_cmpset(&(port->port_status),
1502                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1503                         printf("Port %d can not be set into started\n", pi);
1504
1505                 rte_eth_macaddr_get(pi, &mac_addr);
1506                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1507                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1508                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1509                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1510
1511                 /* at least one port started, need checking link status */
1512                 need_check_link_status = 1;
1513         }
1514
1515         if (need_check_link_status == 1 && !no_link_check)
1516                 check_all_ports_link_status(RTE_PORT_ALL);
1517         else if (need_check_link_status == 0)
1518                 printf("Please stop the ports first\n");
1519
1520         printf("Done\n");
1521         return 0;
1522 }
1523
1524 void
1525 stop_port(portid_t pid)
1526 {
1527         portid_t pi;
1528         struct rte_port *port;
1529         int need_check_link_status = 0;
1530
1531         if (dcb_test) {
1532                 dcb_test = 0;
1533                 dcb_config = 0;
1534         }
1535
1536         if (port_id_is_invalid(pid, ENABLED_WARN))
1537                 return;
1538
1539         printf("Stopping ports...\n");
1540
1541         RTE_ETH_FOREACH_DEV(pi) {
1542                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1543                         continue;
1544
1545                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1546                         printf("Please remove port %d from forwarding configuration.\n", pi);
1547                         continue;
1548                 }
1549
1550                 if (port_is_bonding_slave(pi)) {
1551                         printf("Please remove port %d from bonded device.\n", pi);
1552                         continue;
1553                 }
1554
1555                 port = &ports[pi];
1556                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1557                                                 RTE_PORT_HANDLING) == 0)
1558                         continue;
1559
1560                 rte_eth_dev_stop(pi);
1561
1562                 if (rte_atomic16_cmpset(&(port->port_status),
1563                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1564                         printf("Port %d can not be set into stopped\n", pi);
1565                 need_check_link_status = 1;
1566         }
1567         if (need_check_link_status && !no_link_check)
1568                 check_all_ports_link_status(RTE_PORT_ALL);
1569
1570         printf("Done\n");
1571 }
1572
1573 void
1574 close_port(portid_t pid)
1575 {
1576         portid_t pi;
1577         struct rte_port *port;
1578
1579         if (port_id_is_invalid(pid, ENABLED_WARN))
1580                 return;
1581
1582         printf("Closing ports...\n");
1583
1584         RTE_ETH_FOREACH_DEV(pi) {
1585                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1586                         continue;
1587
1588                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1589                         printf("Please remove port %d from forwarding configuration.\n", pi);
1590                         continue;
1591                 }
1592
1593                 if (port_is_bonding_slave(pi)) {
1594                         printf("Please remove port %d from bonded device.\n", pi);
1595                         continue;
1596                 }
1597
1598                 port = &ports[pi];
1599                 if (rte_atomic16_cmpset(&(port->port_status),
1600                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1601                         printf("Port %d is already closed\n", pi);
1602                         continue;
1603                 }
1604
1605                 if (rte_atomic16_cmpset(&(port->port_status),
1606                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1607                         printf("Port %d is now not stopped\n", pi);
1608                         continue;
1609                 }
1610
1611                 if (port->flow_list)
1612                         port_flow_flush(pi);
1613                 rte_eth_dev_close(pi);
1614
1615                 if (rte_atomic16_cmpset(&(port->port_status),
1616                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1617                         printf("Port %d cannot be set to closed\n", pi);
1618         }
1619
1620         printf("Done\n");
1621 }
1622
1623 void
1624 attach_port(char *identifier)
1625 {
1626         portid_t pi = 0;
1627         unsigned int socket_id;
1628
1629         printf("Attaching a new port...\n");
1630
1631         if (identifier == NULL) {
1632                 printf("Invalid parameters are specified\n");
1633                 return;
1634         }
1635
1636         if (rte_eth_dev_attach(identifier, &pi))
1637                 return;
1638
1639         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1640         /* if socket_id is invalid, set to 0 */
1641         if (check_socket_id(socket_id) < 0)
1642                 socket_id = 0;
1643         reconfig(pi, socket_id);
1644         rte_eth_promiscuous_enable(pi);
1645
1646         nb_ports = rte_eth_dev_count();
1647
1648         ports[pi].port_status = RTE_PORT_STOPPED;
1649
1650         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1651         printf("Done\n");
1652 }
1653
1654 void
1655 detach_port(uint8_t port_id)
1656 {
1657         char name[RTE_ETH_NAME_MAX_LEN];
1658
1659         printf("Detaching a port...\n");
1660
1661         if (!port_is_closed(port_id)) {
1662                 printf("Please close port first\n");
1663                 return;
1664         }
1665
1666         if (ports[port_id].flow_list)
1667                 port_flow_flush(port_id);
1668
1669         if (rte_eth_dev_detach(port_id, name))
1670                 return;
1671
1672         nb_ports = rte_eth_dev_count();
1673
1674         printf("Port '%s' is detached. Now total ports is %d\n",
1675                         name, nb_ports);
1676         printf("Done\n");
1677         return;
1678 }
1679
1680 void
1681 pmd_test_exit(void)
1682 {
1683         portid_t pt_id;
1684
1685         if (test_done == 0)
1686                 stop_packet_forwarding();
1687
1688         if (ports != NULL) {
1689                 no_link_check = 1;
1690                 RTE_ETH_FOREACH_DEV(pt_id) {
1691                         printf("\nShutting down port %d...\n", pt_id);
1692                         fflush(stdout);
1693                         stop_port(pt_id);
1694                         close_port(pt_id);
1695                 }
1696         }
1697         printf("\nBye...\n");
1698 }
1699
1700 typedef void (*cmd_func_t)(void);
1701 struct pmd_test_command {
1702         const char *cmd_name;
1703         cmd_func_t cmd_func;
1704 };
1705
1706 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1707
1708 /* Check the link status of all ports in up to 9s, and print them finally */
1709 static void
1710 check_all_ports_link_status(uint32_t port_mask)
1711 {
1712 #define CHECK_INTERVAL 100 /* 100ms */
1713 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1714         uint8_t portid, count, all_ports_up, print_flag = 0;
1715         struct rte_eth_link link;
1716
1717         printf("Checking link statuses...\n");
1718         fflush(stdout);
1719         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1720                 all_ports_up = 1;
1721                 RTE_ETH_FOREACH_DEV(portid) {
1722                         if ((port_mask & (1 << portid)) == 0)
1723                                 continue;
1724                         memset(&link, 0, sizeof(link));
1725                         rte_eth_link_get_nowait(portid, &link);
1726                         /* print link status if flag set */
1727                         if (print_flag == 1) {
1728                                 if (link.link_status)
1729                                         printf("Port %d Link Up - speed %u "
1730                                                 "Mbps - %s\n", (uint8_t)portid,
1731                                                 (unsigned)link.link_speed,
1732                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1733                                         ("full-duplex") : ("half-duplex\n"));
1734                                 else
1735                                         printf("Port %d Link Down\n",
1736                                                 (uint8_t)portid);
1737                                 continue;
1738                         }
1739                         /* clear all_ports_up flag if any link down */
1740                         if (link.link_status == ETH_LINK_DOWN) {
1741                                 all_ports_up = 0;
1742                                 break;
1743                         }
1744                 }
1745                 /* after finally printing all link status, get out */
1746                 if (print_flag == 1)
1747                         break;
1748
1749                 if (all_ports_up == 0) {
1750                         fflush(stdout);
1751                         rte_delay_ms(CHECK_INTERVAL);
1752                 }
1753
1754                 /* set the print_flag if all ports up or timeout */
1755                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1756                         print_flag = 1;
1757                 }
1758
1759                 if (lsc_interrupt)
1760                         break;
1761         }
1762 }
1763
1764 static void
1765 rmv_event_callback(void *arg)
1766 {
1767         struct rte_eth_dev *dev;
1768         struct rte_devargs *da;
1769         char name[32] = "";
1770         uint8_t port_id = (intptr_t)arg;
1771
1772         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1773         dev = &rte_eth_devices[port_id];
1774         da = dev->device->devargs;
1775
1776         stop_port(port_id);
1777         close_port(port_id);
1778         if (da->type == RTE_DEVTYPE_VIRTUAL)
1779                 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1780         else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1781                 rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1782         printf("removing device %s\n", name);
1783         rte_eal_dev_detach(name);
1784         dev->state = RTE_ETH_DEV_UNUSED;
1785 }
1786
1787 /* This function is used by the interrupt thread */
1788 static void
1789 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1790 {
1791         static const char * const event_desc[] = {
1792                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1793                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1794                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1795                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1796                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1797                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1798                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1799                 [RTE_ETH_EVENT_MAX] = NULL,
1800         };
1801
1802         RTE_SET_USED(param);
1803
1804         if (type >= RTE_ETH_EVENT_MAX) {
1805                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1806                         port_id, __func__, type);
1807                 fflush(stderr);
1808         } else {
1809                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1810                         event_desc[type]);
1811                 fflush(stdout);
1812         }
1813
1814         switch (type) {
1815         case RTE_ETH_EVENT_INTR_RMV:
1816                 if (rte_eal_alarm_set(100000,
1817                                 rmv_event_callback, (void *)(intptr_t)port_id))
1818                         fprintf(stderr, "Could not set up deferred device removal\n");
1819                 break;
1820         default:
1821                 break;
1822         }
1823 }
1824
1825 static int
1826 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1827 {
1828         uint16_t i;
1829         int diag;
1830         uint8_t mapping_found = 0;
1831
1832         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1833                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1834                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1835                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1836                                         tx_queue_stats_mappings[i].queue_id,
1837                                         tx_queue_stats_mappings[i].stats_counter_id);
1838                         if (diag != 0)
1839                                 return diag;
1840                         mapping_found = 1;
1841                 }
1842         }
1843         if (mapping_found)
1844                 port->tx_queue_stats_mapping_enabled = 1;
1845         return 0;
1846 }
1847
1848 static int
1849 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1850 {
1851         uint16_t i;
1852         int diag;
1853         uint8_t mapping_found = 0;
1854
1855         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1856                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1857                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1858                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1859                                         rx_queue_stats_mappings[i].queue_id,
1860                                         rx_queue_stats_mappings[i].stats_counter_id);
1861                         if (diag != 0)
1862                                 return diag;
1863                         mapping_found = 1;
1864                 }
1865         }
1866         if (mapping_found)
1867                 port->rx_queue_stats_mapping_enabled = 1;
1868         return 0;
1869 }
1870
1871 static void
1872 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1873 {
1874         int diag = 0;
1875
1876         diag = set_tx_queue_stats_mapping_registers(pi, port);
1877         if (diag != 0) {
1878                 if (diag == -ENOTSUP) {
1879                         port->tx_queue_stats_mapping_enabled = 0;
1880                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1881                 }
1882                 else
1883                         rte_exit(EXIT_FAILURE,
1884                                         "set_tx_queue_stats_mapping_registers "
1885                                         "failed for port id=%d diag=%d\n",
1886                                         pi, diag);
1887         }
1888
1889         diag = set_rx_queue_stats_mapping_registers(pi, port);
1890         if (diag != 0) {
1891                 if (diag == -ENOTSUP) {
1892                         port->rx_queue_stats_mapping_enabled = 0;
1893                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1894                 }
1895                 else
1896                         rte_exit(EXIT_FAILURE,
1897                                         "set_rx_queue_stats_mapping_registers "
1898                                         "failed for port id=%d diag=%d\n",
1899                                         pi, diag);
1900         }
1901 }
1902
1903 static void
1904 rxtx_port_config(struct rte_port *port)
1905 {
1906         port->rx_conf = port->dev_info.default_rxconf;
1907         port->tx_conf = port->dev_info.default_txconf;
1908
1909         /* Check if any RX/TX parameters have been passed */
1910         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1911                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1912
1913         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1914                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1915
1916         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1917                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1918
1919         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1920                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1921
1922         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1923                 port->rx_conf.rx_drop_en = rx_drop_en;
1924
1925         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1926                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1927
1928         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1929                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1930
1931         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1932                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1933
1934         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1935                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1936
1937         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1938                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1939
1940         if (txq_flags != RTE_PMD_PARAM_UNSET)
1941                 port->tx_conf.txq_flags = txq_flags;
1942 }
1943
1944 void
1945 init_port_config(void)
1946 {
1947         portid_t pid;
1948         struct rte_port *port;
1949
1950         RTE_ETH_FOREACH_DEV(pid) {
1951                 port = &ports[pid];
1952                 port->dev_conf.rxmode = rx_mode;
1953                 port->dev_conf.fdir_conf = fdir_conf;
1954                 if (nb_rxq > 1) {
1955                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1956                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1957                 } else {
1958                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1959                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1960                 }
1961
1962                 if (port->dcb_flag == 0) {
1963                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1964                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1965                         else
1966                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1967                 }
1968
1969                 rxtx_port_config(port);
1970
1971                 rte_eth_macaddr_get(pid, &port->eth_addr);
1972
1973                 map_port_queue_stats_mapping_registers(pid, port);
1974 #ifdef RTE_NIC_BYPASS
1975                 rte_eth_dev_bypass_init(pid);
1976 #endif
1977
1978                 if (lsc_interrupt &&
1979                     (rte_eth_devices[pid].data->dev_flags &
1980                      RTE_ETH_DEV_INTR_LSC))
1981                         port->dev_conf.intr_conf.lsc = 1;
1982                 if (rmv_interrupt &&
1983                     (rte_eth_devices[pid].data->dev_flags &
1984                      RTE_ETH_DEV_INTR_RMV))
1985                         port->dev_conf.intr_conf.rmv = 1;
1986         }
1987 }
1988
1989 void set_port_slave_flag(portid_t slave_pid)
1990 {
1991         struct rte_port *port;
1992
1993         port = &ports[slave_pid];
1994         port->slave_flag = 1;
1995 }
1996
1997 void clear_port_slave_flag(portid_t slave_pid)
1998 {
1999         struct rte_port *port;
2000
2001         port = &ports[slave_pid];
2002         port->slave_flag = 0;
2003 }
2004
2005 uint8_t port_is_bonding_slave(portid_t slave_pid)
2006 {
2007         struct rte_port *port;
2008
2009         port = &ports[slave_pid];
2010         return port->slave_flag;
2011 }
2012
2013 const uint16_t vlan_tags[] = {
2014                 0,  1,  2,  3,  4,  5,  6,  7,
2015                 8,  9, 10, 11,  12, 13, 14, 15,
2016                 16, 17, 18, 19, 20, 21, 22, 23,
2017                 24, 25, 26, 27, 28, 29, 30, 31
2018 };
2019
2020 static  int
2021 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2022                  enum dcb_mode_enable dcb_mode,
2023                  enum rte_eth_nb_tcs num_tcs,
2024                  uint8_t pfc_en)
2025 {
2026         uint8_t i;
2027
2028         /*
2029          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2030          * given above, and the number of traffic classes available for use.
2031          */
2032         if (dcb_mode == DCB_VT_ENABLED) {
2033                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2034                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2035                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2036                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2037
2038                 /* VMDQ+DCB RX and TX configurations */
2039                 vmdq_rx_conf->enable_default_pool = 0;
2040                 vmdq_rx_conf->default_pool = 0;
2041                 vmdq_rx_conf->nb_queue_pools =
2042                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2043                 vmdq_tx_conf->nb_queue_pools =
2044                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2045
2046                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2047                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2048                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2049                         vmdq_rx_conf->pool_map[i].pools =
2050                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2051                 }
2052                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2053                         vmdq_rx_conf->dcb_tc[i] = i;
2054                         vmdq_tx_conf->dcb_tc[i] = i;
2055                 }
2056
2057                 /* set DCB mode of RX and TX of multiple queues */
2058                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2059                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2060         } else {
2061                 struct rte_eth_dcb_rx_conf *rx_conf =
2062                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2063                 struct rte_eth_dcb_tx_conf *tx_conf =
2064                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2065
2066                 rx_conf->nb_tcs = num_tcs;
2067                 tx_conf->nb_tcs = num_tcs;
2068
2069                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2070                         rx_conf->dcb_tc[i] = i % num_tcs;
2071                         tx_conf->dcb_tc[i] = i % num_tcs;
2072                 }
2073                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2074                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2075                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2076         }
2077
2078         if (pfc_en)
2079                 eth_conf->dcb_capability_en =
2080                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2081         else
2082                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2083
2084         return 0;
2085 }
2086
2087 int
2088 init_port_dcb_config(portid_t pid,
2089                      enum dcb_mode_enable dcb_mode,
2090                      enum rte_eth_nb_tcs num_tcs,
2091                      uint8_t pfc_en)
2092 {
2093         struct rte_eth_conf port_conf;
2094         struct rte_port *rte_port;
2095         int retval;
2096         uint16_t i;
2097
2098         rte_port = &ports[pid];
2099
2100         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2101         /* Enter DCB configuration status */
2102         dcb_config = 1;
2103
2104         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2105         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2106         if (retval < 0)
2107                 return retval;
2108         port_conf.rxmode.hw_vlan_filter = 1;
2109
2110         /**
2111          * Write the configuration into the device.
2112          * Set the numbers of RX & TX queues to 0, so
2113          * the RX & TX queues will not be setup.
2114          */
2115         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2116
2117         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2118
2119         /* If dev_info.vmdq_pool_base is greater than 0,
2120          * the queue id of vmdq pools is started after pf queues.
2121          */
2122         if (dcb_mode == DCB_VT_ENABLED &&
2123             rte_port->dev_info.vmdq_pool_base > 0) {
2124                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2125                         " for port %d.", pid);
2126                 return -1;
2127         }
2128
2129         /* Assume the ports in testpmd have the same dcb capability
2130          * and has the same number of rxq and txq in dcb mode
2131          */
2132         if (dcb_mode == DCB_VT_ENABLED) {
2133                 if (rte_port->dev_info.max_vfs > 0) {
2134                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2135                         nb_txq = rte_port->dev_info.nb_tx_queues;
2136                 } else {
2137                         nb_rxq = rte_port->dev_info.max_rx_queues;
2138                         nb_txq = rte_port->dev_info.max_tx_queues;
2139                 }
2140         } else {
2141                 /*if vt is disabled, use all pf queues */
2142                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2143                         nb_rxq = rte_port->dev_info.max_rx_queues;
2144                         nb_txq = rte_port->dev_info.max_tx_queues;
2145                 } else {
2146                         nb_rxq = (queueid_t)num_tcs;
2147                         nb_txq = (queueid_t)num_tcs;
2148
2149                 }
2150         }
2151         rx_free_thresh = 64;
2152
2153         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2154
2155         rxtx_port_config(rte_port);
2156         /* VLAN filter */
2157         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2158         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2159                 rx_vft_set(pid, vlan_tags[i], 1);
2160
2161         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2162         map_port_queue_stats_mapping_registers(pid, rte_port);
2163
2164         rte_port->dcb_flag = 1;
2165
2166         return 0;
2167 }
2168
2169 static void
2170 init_port(void)
2171 {
2172         /* Configuration of Ethernet ports. */
2173         ports = rte_zmalloc("testpmd: ports",
2174                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2175                             RTE_CACHE_LINE_SIZE);
2176         if (ports == NULL) {
2177                 rte_exit(EXIT_FAILURE,
2178                                 "rte_zmalloc(%d struct rte_port) failed\n",
2179                                 RTE_MAX_ETHPORTS);
2180         }
2181 }
2182
2183 static void
2184 force_quit(void)
2185 {
2186         pmd_test_exit();
2187         prompt_exit();
2188 }
2189
2190 static void
2191 signal_handler(int signum)
2192 {
2193         if (signum == SIGINT || signum == SIGTERM) {
2194                 printf("\nSignal %d received, preparing to exit...\n",
2195                                 signum);
2196 #ifdef RTE_LIBRTE_PDUMP
2197                 /* uninitialize packet capture framework */
2198                 rte_pdump_uninit();
2199 #endif
2200 #ifdef RTE_LIBRTE_LATENCY_STATS
2201                 rte_latencystats_uninit();
2202 #endif
2203                 force_quit();
2204                 /* exit with the expected status */
2205                 signal(signum, SIG_DFL);
2206                 kill(getpid(), signum);
2207         }
2208 }
2209
2210 int
2211 main(int argc, char** argv)
2212 {
2213         int  diag;
2214         uint8_t port_id;
2215
2216         signal(SIGINT, signal_handler);
2217         signal(SIGTERM, signal_handler);
2218
2219         diag = rte_eal_init(argc, argv);
2220         if (diag < 0)
2221                 rte_panic("Cannot init EAL\n");
2222
2223 #ifdef RTE_LIBRTE_PDUMP
2224         /* initialize packet capture framework */
2225         rte_pdump_init(NULL);
2226 #endif
2227
2228         nb_ports = (portid_t) rte_eth_dev_count();
2229         if (nb_ports == 0)
2230                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2231
2232         /* allocate port structures, and init them */
2233         init_port();
2234
2235         set_def_fwd_config();
2236         if (nb_lcores == 0)
2237                 rte_panic("Empty set of forwarding logical cores - check the "
2238                           "core mask supplied in the command parameters\n");
2239
2240         /* Bitrate stats disabled by default */
2241         bitrate_enabled = 0;
2242
2243         argc -= diag;
2244         argv += diag;
2245         if (argc > 1)
2246                 launch_args_parse(argc, argv);
2247
2248         if (!nb_rxq && !nb_txq)
2249                 printf("Warning: Either rx or tx queues should be non-zero\n");
2250
2251         if (nb_rxq > 1 && nb_rxq > nb_txq)
2252                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2253                        "but nb_txq=%d will prevent to fully test it.\n",
2254                        nb_rxq, nb_txq);
2255
2256         init_config();
2257         if (start_port(RTE_PORT_ALL) != 0)
2258                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2259
2260         /* set all ports to promiscuous mode by default */
2261         RTE_ETH_FOREACH_DEV(port_id)
2262                 rte_eth_promiscuous_enable(port_id);
2263
2264         /* Init metrics library */
2265         rte_metrics_init(rte_socket_id());
2266
2267 #ifdef RTE_LIBRTE_LATENCY_STATS
2268         if (latencystats_enabled != 0) {
2269                 int ret = rte_latencystats_init(1, NULL);
2270                 if (ret)
2271                         printf("Warning: latencystats init()"
2272                                 " returned error %d\n", ret);
2273                 printf("Latencystats running on lcore %d\n",
2274                         latencystats_lcore_id);
2275         }
2276 #endif
2277
2278         /* Setup bitrate stats */
2279 #ifdef RTE_LIBRTE_BITRATE
2280         if (bitrate_enabled != 0) {
2281                 bitrate_data = rte_stats_bitrate_create();
2282                 if (bitrate_data == NULL)
2283                         rte_exit(EXIT_FAILURE,
2284                                 "Could not allocate bitrate data.\n");
2285                 rte_stats_bitrate_reg(bitrate_data);
2286         }
2287 #endif
2288
2289
2290 #ifdef RTE_LIBRTE_CMDLINE
2291         if (interactive == 1) {
2292                 if (auto_start) {
2293                         printf("Start automatic packet forwarding\n");
2294                         start_packet_forwarding(0);
2295                 }
2296                 prompt();
2297                 pmd_test_exit();
2298         } else
2299 #endif
2300         {
2301                 char c;
2302                 int rc;
2303
2304                 printf("No commandline core given, start packet forwarding\n");
2305                 start_packet_forwarding(0);
2306                 printf("Press enter to exit\n");
2307                 rc = read(0, &c, 1);
2308                 pmd_test_exit();
2309                 if (rc < 0)
2310                         return 1;
2311         }
2312
2313         return 0;
2314 }