2ab50915cb9e11d462a8cda900d190d3d0d0f56e
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81 #include <rte_flow.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
85 #endif
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 0; /**< No numa support by default */
107
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;        /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156         &io_fwd_engine,
157         &mac_fwd_engine,
158         &mac_swap_engine,
159         &flow_gen_engine,
160         &rx_only_engine,
161         &tx_only_engine,
162         &csum_fwd_engine,
163         &icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165         &ieee1588_fwd_engine,
166 #endif
167         NULL,
168 };
169
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185         TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272
273 /*
274  * NIC bypass mode configuration options.
275  */
276 #ifdef RTE_NIC_BYPASS
277
278 /* The NIC bypass watchdog timeout. */
279 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
280
281 #endif
282
283 #ifdef RTE_LIBRTE_LATENCY_STATS
284
285 /*
286  * Set when latency stats is enabled in the commandline
287  */
288 uint8_t latencystats_enabled;
289
290 /*
291  * Lcore ID to serive latency statistics.
292  */
293 lcoreid_t latencystats_lcore_id = -1;
294
295 #endif
296
297 /*
298  * Ethernet device configuration.
299  */
300 struct rte_eth_rxmode rx_mode = {
301         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
302         .split_hdr_size = 0,
303         .header_split   = 0, /**< Header Split disabled. */
304         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
305         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
306         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
307         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
308         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
309         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
310 };
311
312 struct rte_fdir_conf fdir_conf = {
313         .mode = RTE_FDIR_MODE_NONE,
314         .pballoc = RTE_FDIR_PBALLOC_64K,
315         .status = RTE_FDIR_REPORT_STATUS,
316         .mask = {
317                 .vlan_tci_mask = 0x0,
318                 .ipv4_mask     = {
319                         .src_ip = 0xFFFFFFFF,
320                         .dst_ip = 0xFFFFFFFF,
321                 },
322                 .ipv6_mask     = {
323                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
324                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
325                 },
326                 .src_port_mask = 0xFFFF,
327                 .dst_port_mask = 0xFFFF,
328                 .mac_addr_byte_mask = 0xFF,
329                 .tunnel_type_mask = 1,
330                 .tunnel_id_mask = 0xFFFFFFFF,
331         },
332         .drop_queue = 127,
333 };
334
335 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
336
337 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
338 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
339
340 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
341 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
342
343 uint16_t nb_tx_queue_stats_mappings = 0;
344 uint16_t nb_rx_queue_stats_mappings = 0;
345
346 unsigned max_socket = 0;
347
348 /* Bitrate statistics */
349 struct rte_stats_bitrates *bitrate_data;
350
351 /* Forward function declarations */
352 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
353 static void check_all_ports_link_status(uint32_t port_mask);
354
355 /*
356  * Check if all the ports are started.
357  * If yes, return positive value. If not, return zero.
358  */
359 static int all_ports_started(void);
360
361 /*
362  * Setup default configuration.
363  */
364 static void
365 set_default_fwd_lcores_config(void)
366 {
367         unsigned int i;
368         unsigned int nb_lc;
369         unsigned int sock_num;
370
371         nb_lc = 0;
372         for (i = 0; i < RTE_MAX_LCORE; i++) {
373                 sock_num = rte_lcore_to_socket_id(i) + 1;
374                 if (sock_num > max_socket) {
375                         if (sock_num > RTE_MAX_NUMA_NODES)
376                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
377                         max_socket = sock_num;
378                 }
379                 if (!rte_lcore_is_enabled(i))
380                         continue;
381                 if (i == rte_get_master_lcore())
382                         continue;
383                 fwd_lcores_cpuids[nb_lc++] = i;
384         }
385         nb_lcores = (lcoreid_t) nb_lc;
386         nb_cfg_lcores = nb_lcores;
387         nb_fwd_lcores = 1;
388 }
389
390 static void
391 set_def_peer_eth_addrs(void)
392 {
393         portid_t i;
394
395         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
396                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
397                 peer_eth_addrs[i].addr_bytes[5] = i;
398         }
399 }
400
401 static void
402 set_default_fwd_ports_config(void)
403 {
404         portid_t pt_id;
405
406         for (pt_id = 0; pt_id < nb_ports; pt_id++)
407                 fwd_ports_ids[pt_id] = pt_id;
408
409         nb_cfg_ports = nb_ports;
410         nb_fwd_ports = nb_ports;
411 }
412
413 void
414 set_def_fwd_config(void)
415 {
416         set_default_fwd_lcores_config();
417         set_def_peer_eth_addrs();
418         set_default_fwd_ports_config();
419 }
420
421 /*
422  * Configuration initialisation done once at init time.
423  */
424 static void
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426                  unsigned int socket_id)
427 {
428         char pool_name[RTE_MEMPOOL_NAMESIZE];
429         struct rte_mempool *rte_mp = NULL;
430         uint32_t mb_size;
431
432         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
433         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
434
435         RTE_LOG(INFO, USER1,
436                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
437                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
438
439 #ifdef RTE_LIBRTE_PMD_XENVIRT
440         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
441                 (unsigned) mb_mempool_cache,
442                 sizeof(struct rte_pktmbuf_pool_private),
443                 rte_pktmbuf_pool_init, NULL,
444                 rte_pktmbuf_init, NULL,
445                 socket_id, 0);
446 #endif
447
448         /* if the former XEN allocation failed fall back to normal allocation */
449         if (rte_mp == NULL) {
450                 if (mp_anon != 0) {
451                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
452                                 mb_size, (unsigned) mb_mempool_cache,
453                                 sizeof(struct rte_pktmbuf_pool_private),
454                                 socket_id, 0);
455                         if (rte_mp == NULL)
456                                 goto err;
457
458                         if (rte_mempool_populate_anon(rte_mp) == 0) {
459                                 rte_mempool_free(rte_mp);
460                                 rte_mp = NULL;
461                                 goto err;
462                         }
463                         rte_pktmbuf_pool_init(rte_mp, NULL);
464                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
465                 } else {
466                         /* wrapper to rte_mempool_create() */
467                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
468                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
469                 }
470         }
471
472 err:
473         if (rte_mp == NULL) {
474                 rte_exit(EXIT_FAILURE,
475                         "Creation of mbuf pool for socket %u failed: %s\n",
476                         socket_id, rte_strerror(rte_errno));
477         } else if (verbose_level > 0) {
478                 rte_mempool_dump(stdout, rte_mp);
479         }
480 }
481
482 /*
483  * Check given socket id is valid or not with NUMA mode,
484  * if valid, return 0, else return -1
485  */
486 static int
487 check_socket_id(const unsigned int socket_id)
488 {
489         static int warning_once = 0;
490
491         if (socket_id >= max_socket) {
492                 if (!warning_once && numa_support)
493                         printf("Warning: NUMA should be configured manually by"
494                                " using --port-numa-config and"
495                                " --ring-numa-config parameters along with"
496                                " --numa.\n");
497                 warning_once = 1;
498                 return -1;
499         }
500         return 0;
501 }
502
503 static void
504 init_config(void)
505 {
506         portid_t pid;
507         struct rte_port *port;
508         struct rte_mempool *mbp;
509         unsigned int nb_mbuf_per_pool;
510         lcoreid_t  lc_id;
511         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
512
513         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
514         /* Configuration of logical cores. */
515         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
516                                 sizeof(struct fwd_lcore *) * nb_lcores,
517                                 RTE_CACHE_LINE_SIZE);
518         if (fwd_lcores == NULL) {
519                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
520                                                         "failed\n", nb_lcores);
521         }
522         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
523                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
524                                                sizeof(struct fwd_lcore),
525                                                RTE_CACHE_LINE_SIZE);
526                 if (fwd_lcores[lc_id] == NULL) {
527                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
528                                                                 "failed\n");
529                 }
530                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
531         }
532
533         /*
534          * Create pools of mbuf.
535          * If NUMA support is disabled, create a single pool of mbuf in
536          * socket 0 memory by default.
537          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
538          *
539          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
540          * nb_txd can be configured at run time.
541          */
542         if (param_total_num_mbufs)
543                 nb_mbuf_per_pool = param_total_num_mbufs;
544         else {
545                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
546                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
547
548                 if (!numa_support)
549                         nb_mbuf_per_pool =
550                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
551         }
552
553         if (!numa_support) {
554                 if (socket_num == UMA_NO_CONFIG)
555                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
556                 else
557                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
558                                                  socket_num);
559         }
560
561         RTE_ETH_FOREACH_DEV(pid) {
562                 port = &ports[pid];
563                 rte_eth_dev_info_get(pid, &port->dev_info);
564
565                 if (numa_support) {
566                         if (port_numa[pid] != NUMA_NO_CONFIG)
567                                 port_per_socket[port_numa[pid]]++;
568                         else {
569                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
570
571                                 /* if socket_id is invalid, set to 0 */
572                                 if (check_socket_id(socket_id) < 0)
573                                         socket_id = 0;
574                                 port_per_socket[socket_id]++;
575                         }
576                 }
577
578                 /* set flag to initialize port/queue */
579                 port->need_reconfig = 1;
580                 port->need_reconfig_queues = 1;
581         }
582
583         if (numa_support) {
584                 uint8_t i;
585                 unsigned int nb_mbuf;
586
587                 if (param_total_num_mbufs)
588                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
589
590                 for (i = 0; i < max_socket; i++) {
591                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
592                         if (nb_mbuf)
593                                 mbuf_pool_create(mbuf_data_size,
594                                                 nb_mbuf,i);
595                 }
596         }
597         init_port_config();
598
599         /*
600          * Records which Mbuf pool to use by each logical core, if needed.
601          */
602         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
603                 mbp = mbuf_pool_find(
604                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
605
606                 if (mbp == NULL)
607                         mbp = mbuf_pool_find(0);
608                 fwd_lcores[lc_id]->mbp = mbp;
609         }
610
611         /* Configuration of packet forwarding streams. */
612         if (init_fwd_streams() < 0)
613                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
614
615         fwd_config_setup();
616 }
617
618
619 void
620 reconfig(portid_t new_port_id, unsigned socket_id)
621 {
622         struct rte_port *port;
623
624         /* Reconfiguration of Ethernet ports. */
625         port = &ports[new_port_id];
626         rte_eth_dev_info_get(new_port_id, &port->dev_info);
627
628         /* set flag to initialize port/queue */
629         port->need_reconfig = 1;
630         port->need_reconfig_queues = 1;
631         port->socket_id = socket_id;
632
633         init_port_config();
634 }
635
636
637 int
638 init_fwd_streams(void)
639 {
640         portid_t pid;
641         struct rte_port *port;
642         streamid_t sm_id, nb_fwd_streams_new;
643         queueid_t q;
644
645         /* set socket id according to numa or not */
646         RTE_ETH_FOREACH_DEV(pid) {
647                 port = &ports[pid];
648                 if (nb_rxq > port->dev_info.max_rx_queues) {
649                         printf("Fail: nb_rxq(%d) is greater than "
650                                 "max_rx_queues(%d)\n", nb_rxq,
651                                 port->dev_info.max_rx_queues);
652                         return -1;
653                 }
654                 if (nb_txq > port->dev_info.max_tx_queues) {
655                         printf("Fail: nb_txq(%d) is greater than "
656                                 "max_tx_queues(%d)\n", nb_txq,
657                                 port->dev_info.max_tx_queues);
658                         return -1;
659                 }
660                 if (numa_support) {
661                         if (port_numa[pid] != NUMA_NO_CONFIG)
662                                 port->socket_id = port_numa[pid];
663                         else {
664                                 port->socket_id = rte_eth_dev_socket_id(pid);
665
666                                 /* if socket_id is invalid, set to 0 */
667                                 if (check_socket_id(port->socket_id) < 0)
668                                         port->socket_id = 0;
669                         }
670                 }
671                 else {
672                         if (socket_num == UMA_NO_CONFIG)
673                                 port->socket_id = 0;
674                         else
675                                 port->socket_id = socket_num;
676                 }
677         }
678
679         q = RTE_MAX(nb_rxq, nb_txq);
680         if (q == 0) {
681                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
682                 return -1;
683         }
684         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
685         if (nb_fwd_streams_new == nb_fwd_streams)
686                 return 0;
687         /* clear the old */
688         if (fwd_streams != NULL) {
689                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
690                         if (fwd_streams[sm_id] == NULL)
691                                 continue;
692                         rte_free(fwd_streams[sm_id]);
693                         fwd_streams[sm_id] = NULL;
694                 }
695                 rte_free(fwd_streams);
696                 fwd_streams = NULL;
697         }
698
699         /* init new */
700         nb_fwd_streams = nb_fwd_streams_new;
701         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
702                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
703         if (fwd_streams == NULL)
704                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
705                                                 "failed\n", nb_fwd_streams);
706
707         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
708                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
709                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
710                 if (fwd_streams[sm_id] == NULL)
711                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
712                                                                 " failed\n");
713         }
714
715         return 0;
716 }
717
718 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
719 static void
720 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
721 {
722         unsigned int total_burst;
723         unsigned int nb_burst;
724         unsigned int burst_stats[3];
725         uint16_t pktnb_stats[3];
726         uint16_t nb_pkt;
727         int burst_percent[3];
728
729         /*
730          * First compute the total number of packet bursts and the
731          * two highest numbers of bursts of the same number of packets.
732          */
733         total_burst = 0;
734         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
735         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
736         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
737                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
738                 if (nb_burst == 0)
739                         continue;
740                 total_burst += nb_burst;
741                 if (nb_burst > burst_stats[0]) {
742                         burst_stats[1] = burst_stats[0];
743                         pktnb_stats[1] = pktnb_stats[0];
744                         burst_stats[0] = nb_burst;
745                         pktnb_stats[0] = nb_pkt;
746                 }
747         }
748         if (total_burst == 0)
749                 return;
750         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
751         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
752                burst_percent[0], (int) pktnb_stats[0]);
753         if (burst_stats[0] == total_burst) {
754                 printf("]\n");
755                 return;
756         }
757         if (burst_stats[0] + burst_stats[1] == total_burst) {
758                 printf(" + %d%% of %d pkts]\n",
759                        100 - burst_percent[0], pktnb_stats[1]);
760                 return;
761         }
762         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
763         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
764         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
765                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
766                 return;
767         }
768         printf(" + %d%% of %d pkts + %d%% of others]\n",
769                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
770 }
771 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
772
773 static void
774 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
775 {
776         struct rte_port *port;
777         uint8_t i;
778
779         static const char *fwd_stats_border = "----------------------";
780
781         port = &ports[port_id];
782         printf("\n  %s Forward statistics for port %-2d %s\n",
783                fwd_stats_border, port_id, fwd_stats_border);
784
785         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
786                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
787                        "%-"PRIu64"\n",
788                        stats->ipackets, stats->imissed,
789                        (uint64_t) (stats->ipackets + stats->imissed));
790
791                 if (cur_fwd_eng == &csum_fwd_engine)
792                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
793                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
794                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
795                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
796                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
797                 }
798
799                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
800                        "%-"PRIu64"\n",
801                        stats->opackets, port->tx_dropped,
802                        (uint64_t) (stats->opackets + port->tx_dropped));
803         }
804         else {
805                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
806                        "%14"PRIu64"\n",
807                        stats->ipackets, stats->imissed,
808                        (uint64_t) (stats->ipackets + stats->imissed));
809
810                 if (cur_fwd_eng == &csum_fwd_engine)
811                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
812                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
813                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
814                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
815                         printf("  RX-nombufs:             %14"PRIu64"\n",
816                                stats->rx_nombuf);
817                 }
818
819                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
820                        "%14"PRIu64"\n",
821                        stats->opackets, port->tx_dropped,
822                        (uint64_t) (stats->opackets + port->tx_dropped));
823         }
824
825 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
826         if (port->rx_stream)
827                 pkt_burst_stats_display("RX",
828                         &port->rx_stream->rx_burst_stats);
829         if (port->tx_stream)
830                 pkt_burst_stats_display("TX",
831                         &port->tx_stream->tx_burst_stats);
832 #endif
833
834         if (port->rx_queue_stats_mapping_enabled) {
835                 printf("\n");
836                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
837                         printf("  Stats reg %2d RX-packets:%14"PRIu64
838                                "     RX-errors:%14"PRIu64
839                                "    RX-bytes:%14"PRIu64"\n",
840                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
841                 }
842                 printf("\n");
843         }
844         if (port->tx_queue_stats_mapping_enabled) {
845                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
846                         printf("  Stats reg %2d TX-packets:%14"PRIu64
847                                "                                 TX-bytes:%14"PRIu64"\n",
848                                i, stats->q_opackets[i], stats->q_obytes[i]);
849                 }
850         }
851
852         printf("  %s--------------------------------%s\n",
853                fwd_stats_border, fwd_stats_border);
854 }
855
856 static void
857 fwd_stream_stats_display(streamid_t stream_id)
858 {
859         struct fwd_stream *fs;
860         static const char *fwd_top_stats_border = "-------";
861
862         fs = fwd_streams[stream_id];
863         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
864             (fs->fwd_dropped == 0))
865                 return;
866         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
867                "TX Port=%2d/Queue=%2d %s\n",
868                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
869                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
870         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
871                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
872
873         /* if checksum mode */
874         if (cur_fwd_eng == &csum_fwd_engine) {
875                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
876                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
877         }
878
879 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
880         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
881         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
882 #endif
883 }
884
885 static void
886 flush_fwd_rx_queues(void)
887 {
888         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
889         portid_t  rxp;
890         portid_t port_id;
891         queueid_t rxq;
892         uint16_t  nb_rx;
893         uint16_t  i;
894         uint8_t   j;
895         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
896         uint64_t timer_period;
897
898         /* convert to number of cycles */
899         timer_period = rte_get_timer_hz(); /* 1 second timeout */
900
901         for (j = 0; j < 2; j++) {
902                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
903                         for (rxq = 0; rxq < nb_rxq; rxq++) {
904                                 port_id = fwd_ports_ids[rxp];
905                                 /**
906                                 * testpmd can stuck in the below do while loop
907                                 * if rte_eth_rx_burst() always returns nonzero
908                                 * packets. So timer is added to exit this loop
909                                 * after 1sec timer expiry.
910                                 */
911                                 prev_tsc = rte_rdtsc();
912                                 do {
913                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
914                                                 pkts_burst, MAX_PKT_BURST);
915                                         for (i = 0; i < nb_rx; i++)
916                                                 rte_pktmbuf_free(pkts_burst[i]);
917
918                                         cur_tsc = rte_rdtsc();
919                                         diff_tsc = cur_tsc - prev_tsc;
920                                         timer_tsc += diff_tsc;
921                                 } while ((nb_rx > 0) &&
922                                         (timer_tsc < timer_period));
923                                 timer_tsc = 0;
924                         }
925                 }
926                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
927         }
928 }
929
930 static void
931 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
932 {
933         struct fwd_stream **fsm;
934         streamid_t nb_fs;
935         streamid_t sm_id;
936 #ifdef RTE_LIBRTE_BITRATE
937         uint64_t tics_per_1sec;
938         uint64_t tics_datum;
939         uint64_t tics_current;
940         uint8_t idx_port, cnt_ports;
941
942         cnt_ports = rte_eth_dev_count();
943         tics_datum = rte_rdtsc();
944         tics_per_1sec = rte_get_timer_hz();
945 #endif
946         fsm = &fwd_streams[fc->stream_idx];
947         nb_fs = fc->stream_nb;
948         do {
949                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
950                         (*pkt_fwd)(fsm[sm_id]);
951 #ifdef RTE_LIBRTE_BITRATE
952                 tics_current = rte_rdtsc();
953                 if (tics_current - tics_datum >= tics_per_1sec) {
954                         /* Periodic bitrate calculation */
955                         for (idx_port = 0; idx_port < cnt_ports; idx_port++)
956                                 rte_stats_bitrate_calc(bitrate_data, idx_port);
957                         tics_datum = tics_current;
958                 }
959 #endif
960 #ifdef RTE_LIBRTE_LATENCY_STATS
961                 if (latencystats_lcore_id == rte_lcore_id())
962                         rte_latencystats_update();
963 #endif
964
965         } while (! fc->stopped);
966 }
967
968 static int
969 start_pkt_forward_on_core(void *fwd_arg)
970 {
971         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
972                              cur_fwd_config.fwd_eng->packet_fwd);
973         return 0;
974 }
975
976 /*
977  * Run the TXONLY packet forwarding engine to send a single burst of packets.
978  * Used to start communication flows in network loopback test configurations.
979  */
980 static int
981 run_one_txonly_burst_on_core(void *fwd_arg)
982 {
983         struct fwd_lcore *fwd_lc;
984         struct fwd_lcore tmp_lcore;
985
986         fwd_lc = (struct fwd_lcore *) fwd_arg;
987         tmp_lcore = *fwd_lc;
988         tmp_lcore.stopped = 1;
989         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
990         return 0;
991 }
992
993 /*
994  * Launch packet forwarding:
995  *     - Setup per-port forwarding context.
996  *     - launch logical cores with their forwarding configuration.
997  */
998 static void
999 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1000 {
1001         port_fwd_begin_t port_fwd_begin;
1002         unsigned int i;
1003         unsigned int lc_id;
1004         int diag;
1005
1006         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1007         if (port_fwd_begin != NULL) {
1008                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1009                         (*port_fwd_begin)(fwd_ports_ids[i]);
1010         }
1011         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1012                 lc_id = fwd_lcores_cpuids[i];
1013                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1014                         fwd_lcores[i]->stopped = 0;
1015                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1016                                                      fwd_lcores[i], lc_id);
1017                         if (diag != 0)
1018                                 printf("launch lcore %u failed - diag=%d\n",
1019                                        lc_id, diag);
1020                 }
1021         }
1022 }
1023
1024 /*
1025  * Launch packet forwarding configuration.
1026  */
1027 void
1028 start_packet_forwarding(int with_tx_first)
1029 {
1030         port_fwd_begin_t port_fwd_begin;
1031         port_fwd_end_t  port_fwd_end;
1032         struct rte_port *port;
1033         unsigned int i;
1034         portid_t   pt_id;
1035         streamid_t sm_id;
1036
1037         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1038                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1039
1040         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1041                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1042
1043         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1044                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1045                 (!nb_rxq || !nb_txq))
1046                 rte_exit(EXIT_FAILURE,
1047                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1048                         cur_fwd_eng->fwd_mode_name);
1049
1050         if (all_ports_started() == 0) {
1051                 printf("Not all ports were started\n");
1052                 return;
1053         }
1054         if (test_done == 0) {
1055                 printf("Packet forwarding already started\n");
1056                 return;
1057         }
1058
1059         if (init_fwd_streams() < 0) {
1060                 printf("Fail from init_fwd_streams()\n");
1061                 return;
1062         }
1063
1064         if(dcb_test) {
1065                 for (i = 0; i < nb_fwd_ports; i++) {
1066                         pt_id = fwd_ports_ids[i];
1067                         port = &ports[pt_id];
1068                         if (!port->dcb_flag) {
1069                                 printf("In DCB mode, all forwarding ports must "
1070                                        "be configured in this mode.\n");
1071                                 return;
1072                         }
1073                 }
1074                 if (nb_fwd_lcores == 1) {
1075                         printf("In DCB mode,the nb forwarding cores "
1076                                "should be larger than 1.\n");
1077                         return;
1078                 }
1079         }
1080         test_done = 0;
1081
1082         if(!no_flush_rx)
1083                 flush_fwd_rx_queues();
1084
1085         fwd_config_setup();
1086         pkt_fwd_config_display(&cur_fwd_config);
1087         rxtx_config_display();
1088
1089         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1090                 pt_id = fwd_ports_ids[i];
1091                 port = &ports[pt_id];
1092                 rte_eth_stats_get(pt_id, &port->stats);
1093                 port->tx_dropped = 0;
1094
1095                 map_port_queue_stats_mapping_registers(pt_id, port);
1096         }
1097         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1098                 fwd_streams[sm_id]->rx_packets = 0;
1099                 fwd_streams[sm_id]->tx_packets = 0;
1100                 fwd_streams[sm_id]->fwd_dropped = 0;
1101                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1102                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1103
1104 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1105                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1106                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1107                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1108                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1109 #endif
1110 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1111                 fwd_streams[sm_id]->core_cycles = 0;
1112 #endif
1113         }
1114         if (with_tx_first) {
1115                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1116                 if (port_fwd_begin != NULL) {
1117                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1118                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1119                 }
1120                 while (with_tx_first--) {
1121                         launch_packet_forwarding(
1122                                         run_one_txonly_burst_on_core);
1123                         rte_eal_mp_wait_lcore();
1124                 }
1125                 port_fwd_end = tx_only_engine.port_fwd_end;
1126                 if (port_fwd_end != NULL) {
1127                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1128                                 (*port_fwd_end)(fwd_ports_ids[i]);
1129                 }
1130         }
1131         launch_packet_forwarding(start_pkt_forward_on_core);
1132 }
1133
1134 void
1135 stop_packet_forwarding(void)
1136 {
1137         struct rte_eth_stats stats;
1138         struct rte_port *port;
1139         port_fwd_end_t  port_fwd_end;
1140         int i;
1141         portid_t   pt_id;
1142         streamid_t sm_id;
1143         lcoreid_t  lc_id;
1144         uint64_t total_recv;
1145         uint64_t total_xmit;
1146         uint64_t total_rx_dropped;
1147         uint64_t total_tx_dropped;
1148         uint64_t total_rx_nombuf;
1149         uint64_t tx_dropped;
1150         uint64_t rx_bad_ip_csum;
1151         uint64_t rx_bad_l4_csum;
1152 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1153         uint64_t fwd_cycles;
1154 #endif
1155         static const char *acc_stats_border = "+++++++++++++++";
1156
1157         if (test_done) {
1158                 printf("Packet forwarding not started\n");
1159                 return;
1160         }
1161         printf("Telling cores to stop...");
1162         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1163                 fwd_lcores[lc_id]->stopped = 1;
1164         printf("\nWaiting for lcores to finish...\n");
1165         rte_eal_mp_wait_lcore();
1166         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1167         if (port_fwd_end != NULL) {
1168                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1169                         pt_id = fwd_ports_ids[i];
1170                         (*port_fwd_end)(pt_id);
1171                 }
1172         }
1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1174         fwd_cycles = 0;
1175 #endif
1176         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1177                 if (cur_fwd_config.nb_fwd_streams >
1178                     cur_fwd_config.nb_fwd_ports) {
1179                         fwd_stream_stats_display(sm_id);
1180                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1181                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1182                 } else {
1183                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1184                                 fwd_streams[sm_id];
1185                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1186                                 fwd_streams[sm_id];
1187                 }
1188                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1189                 tx_dropped = (uint64_t) (tx_dropped +
1190                                          fwd_streams[sm_id]->fwd_dropped);
1191                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1192
1193                 rx_bad_ip_csum =
1194                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1195                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1196                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1197                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1198                                                         rx_bad_ip_csum;
1199
1200                 rx_bad_l4_csum =
1201                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1202                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1203                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1204                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1205                                                         rx_bad_l4_csum;
1206
1207 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1208                 fwd_cycles = (uint64_t) (fwd_cycles +
1209                                          fwd_streams[sm_id]->core_cycles);
1210 #endif
1211         }
1212         total_recv = 0;
1213         total_xmit = 0;
1214         total_rx_dropped = 0;
1215         total_tx_dropped = 0;
1216         total_rx_nombuf  = 0;
1217         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1218                 pt_id = fwd_ports_ids[i];
1219
1220                 port = &ports[pt_id];
1221                 rte_eth_stats_get(pt_id, &stats);
1222                 stats.ipackets -= port->stats.ipackets;
1223                 port->stats.ipackets = 0;
1224                 stats.opackets -= port->stats.opackets;
1225                 port->stats.opackets = 0;
1226                 stats.ibytes   -= port->stats.ibytes;
1227                 port->stats.ibytes = 0;
1228                 stats.obytes   -= port->stats.obytes;
1229                 port->stats.obytes = 0;
1230                 stats.imissed  -= port->stats.imissed;
1231                 port->stats.imissed = 0;
1232                 stats.oerrors  -= port->stats.oerrors;
1233                 port->stats.oerrors = 0;
1234                 stats.rx_nombuf -= port->stats.rx_nombuf;
1235                 port->stats.rx_nombuf = 0;
1236
1237                 total_recv += stats.ipackets;
1238                 total_xmit += stats.opackets;
1239                 total_rx_dropped += stats.imissed;
1240                 total_tx_dropped += port->tx_dropped;
1241                 total_rx_nombuf  += stats.rx_nombuf;
1242
1243                 fwd_port_stats_display(pt_id, &stats);
1244         }
1245         printf("\n  %s Accumulated forward statistics for all ports"
1246                "%s\n",
1247                acc_stats_border, acc_stats_border);
1248         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1249                "%-"PRIu64"\n"
1250                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1251                "%-"PRIu64"\n",
1252                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1253                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1254         if (total_rx_nombuf > 0)
1255                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1256         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1257                "%s\n",
1258                acc_stats_border, acc_stats_border);
1259 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1260         if (total_recv > 0)
1261                 printf("\n  CPU cycles/packet=%u (total cycles="
1262                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1263                        (unsigned int)(fwd_cycles / total_recv),
1264                        fwd_cycles, total_recv);
1265 #endif
1266         printf("\nDone.\n");
1267         test_done = 1;
1268 }
1269
1270 void
1271 dev_set_link_up(portid_t pid)
1272 {
1273         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1274                 printf("\nSet link up fail.\n");
1275 }
1276
1277 void
1278 dev_set_link_down(portid_t pid)
1279 {
1280         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1281                 printf("\nSet link down fail.\n");
1282 }
1283
1284 static int
1285 all_ports_started(void)
1286 {
1287         portid_t pi;
1288         struct rte_port *port;
1289
1290         RTE_ETH_FOREACH_DEV(pi) {
1291                 port = &ports[pi];
1292                 /* Check if there is a port which is not started */
1293                 if ((port->port_status != RTE_PORT_STARTED) &&
1294                         (port->slave_flag == 0))
1295                         return 0;
1296         }
1297
1298         /* No port is not started */
1299         return 1;
1300 }
1301
1302 int
1303 all_ports_stopped(void)
1304 {
1305         portid_t pi;
1306         struct rte_port *port;
1307
1308         RTE_ETH_FOREACH_DEV(pi) {
1309                 port = &ports[pi];
1310                 if ((port->port_status != RTE_PORT_STOPPED) &&
1311                         (port->slave_flag == 0))
1312                         return 0;
1313         }
1314
1315         return 1;
1316 }
1317
1318 int
1319 port_is_started(portid_t port_id)
1320 {
1321         if (port_id_is_invalid(port_id, ENABLED_WARN))
1322                 return 0;
1323
1324         if (ports[port_id].port_status != RTE_PORT_STARTED)
1325                 return 0;
1326
1327         return 1;
1328 }
1329
1330 static int
1331 port_is_closed(portid_t port_id)
1332 {
1333         if (port_id_is_invalid(port_id, ENABLED_WARN))
1334                 return 0;
1335
1336         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1337                 return 0;
1338
1339         return 1;
1340 }
1341
1342 int
1343 start_port(portid_t pid)
1344 {
1345         int diag, need_check_link_status = -1;
1346         portid_t pi;
1347         queueid_t qi;
1348         struct rte_port *port;
1349         struct ether_addr mac_addr;
1350
1351         if (port_id_is_invalid(pid, ENABLED_WARN))
1352                 return 0;
1353
1354         if(dcb_config)
1355                 dcb_test = 1;
1356         RTE_ETH_FOREACH_DEV(pi) {
1357                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1358                         continue;
1359
1360                 need_check_link_status = 0;
1361                 port = &ports[pi];
1362                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1363                                                  RTE_PORT_HANDLING) == 0) {
1364                         printf("Port %d is now not stopped\n", pi);
1365                         continue;
1366                 }
1367
1368                 if (port->need_reconfig > 0) {
1369                         port->need_reconfig = 0;
1370
1371                         printf("Configuring Port %d (socket %u)\n", pi,
1372                                         port->socket_id);
1373                         /* configure port */
1374                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1375                                                 &(port->dev_conf));
1376                         if (diag != 0) {
1377                                 if (rte_atomic16_cmpset(&(port->port_status),
1378                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1379                                         printf("Port %d can not be set back "
1380                                                         "to stopped\n", pi);
1381                                 printf("Fail to configure port %d\n", pi);
1382                                 /* try to reconfigure port next time */
1383                                 port->need_reconfig = 1;
1384                                 return -1;
1385                         }
1386                 }
1387                 if (port->need_reconfig_queues > 0) {
1388                         port->need_reconfig_queues = 0;
1389                         /* setup tx queues */
1390                         for (qi = 0; qi < nb_txq; qi++) {
1391                                 if ((numa_support) &&
1392                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1393                                         diag = rte_eth_tx_queue_setup(pi, qi,
1394                                                 nb_txd,txring_numa[pi],
1395                                                 &(port->tx_conf));
1396                                 else
1397                                         diag = rte_eth_tx_queue_setup(pi, qi,
1398                                                 nb_txd,port->socket_id,
1399                                                 &(port->tx_conf));
1400
1401                                 if (diag == 0)
1402                                         continue;
1403
1404                                 /* Fail to setup tx queue, return */
1405                                 if (rte_atomic16_cmpset(&(port->port_status),
1406                                                         RTE_PORT_HANDLING,
1407                                                         RTE_PORT_STOPPED) == 0)
1408                                         printf("Port %d can not be set back "
1409                                                         "to stopped\n", pi);
1410                                 printf("Fail to configure port %d tx queues\n", pi);
1411                                 /* try to reconfigure queues next time */
1412                                 port->need_reconfig_queues = 1;
1413                                 return -1;
1414                         }
1415                         /* setup rx queues */
1416                         for (qi = 0; qi < nb_rxq; qi++) {
1417                                 if ((numa_support) &&
1418                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1419                                         struct rte_mempool * mp =
1420                                                 mbuf_pool_find(rxring_numa[pi]);
1421                                         if (mp == NULL) {
1422                                                 printf("Failed to setup RX queue:"
1423                                                         "No mempool allocation"
1424                                                         " on the socket %d\n",
1425                                                         rxring_numa[pi]);
1426                                                 return -1;
1427                                         }
1428
1429                                         diag = rte_eth_rx_queue_setup(pi, qi,
1430                                              nb_rxd,rxring_numa[pi],
1431                                              &(port->rx_conf),mp);
1432                                 } else {
1433                                         struct rte_mempool *mp =
1434                                                 mbuf_pool_find(port->socket_id);
1435                                         if (mp == NULL) {
1436                                                 printf("Failed to setup RX queue:"
1437                                                         "No mempool allocation"
1438                                                         " on the socket %d\n",
1439                                                         port->socket_id);
1440                                                 return -1;
1441                                         }
1442                                         diag = rte_eth_rx_queue_setup(pi, qi,
1443                                              nb_rxd,port->socket_id,
1444                                              &(port->rx_conf), mp);
1445                                 }
1446                                 if (diag == 0)
1447                                         continue;
1448
1449                                 /* Fail to setup rx queue, return */
1450                                 if (rte_atomic16_cmpset(&(port->port_status),
1451                                                         RTE_PORT_HANDLING,
1452                                                         RTE_PORT_STOPPED) == 0)
1453                                         printf("Port %d can not be set back "
1454                                                         "to stopped\n", pi);
1455                                 printf("Fail to configure port %d rx queues\n", pi);
1456                                 /* try to reconfigure queues next time */
1457                                 port->need_reconfig_queues = 1;
1458                                 return -1;
1459                         }
1460                 }
1461                 /* start port */
1462                 if (rte_eth_dev_start(pi) < 0) {
1463                         printf("Fail to start port %d\n", pi);
1464
1465                         /* Fail to setup rx queue, return */
1466                         if (rte_atomic16_cmpset(&(port->port_status),
1467                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1468                                 printf("Port %d can not be set back to "
1469                                                         "stopped\n", pi);
1470                         continue;
1471                 }
1472
1473                 if (rte_atomic16_cmpset(&(port->port_status),
1474                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1475                         printf("Port %d can not be set into started\n", pi);
1476
1477                 rte_eth_macaddr_get(pi, &mac_addr);
1478                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1479                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1480                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1481                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1482
1483                 /* at least one port started, need checking link status */
1484                 need_check_link_status = 1;
1485         }
1486
1487         if (need_check_link_status == 1 && !no_link_check)
1488                 check_all_ports_link_status(RTE_PORT_ALL);
1489         else if (need_check_link_status == 0)
1490                 printf("Please stop the ports first\n");
1491
1492         printf("Done\n");
1493         return 0;
1494 }
1495
1496 void
1497 stop_port(portid_t pid)
1498 {
1499         portid_t pi;
1500         struct rte_port *port;
1501         int need_check_link_status = 0;
1502
1503         if (dcb_test) {
1504                 dcb_test = 0;
1505                 dcb_config = 0;
1506         }
1507
1508         if (port_id_is_invalid(pid, ENABLED_WARN))
1509                 return;
1510
1511         printf("Stopping ports...\n");
1512
1513         RTE_ETH_FOREACH_DEV(pi) {
1514                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1515                         continue;
1516
1517                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1518                         printf("Please remove port %d from forwarding configuration.\n", pi);
1519                         continue;
1520                 }
1521
1522                 if (port_is_bonding_slave(pi)) {
1523                         printf("Please remove port %d from bonded device.\n", pi);
1524                         continue;
1525                 }
1526
1527                 port = &ports[pi];
1528                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1529                                                 RTE_PORT_HANDLING) == 0)
1530                         continue;
1531
1532                 rte_eth_dev_stop(pi);
1533
1534                 if (rte_atomic16_cmpset(&(port->port_status),
1535                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1536                         printf("Port %d can not be set into stopped\n", pi);
1537                 need_check_link_status = 1;
1538         }
1539         if (need_check_link_status && !no_link_check)
1540                 check_all_ports_link_status(RTE_PORT_ALL);
1541
1542         printf("Done\n");
1543 }
1544
1545 void
1546 close_port(portid_t pid)
1547 {
1548         portid_t pi;
1549         struct rte_port *port;
1550
1551         if (port_id_is_invalid(pid, ENABLED_WARN))
1552                 return;
1553
1554         printf("Closing ports...\n");
1555
1556         RTE_ETH_FOREACH_DEV(pi) {
1557                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1558                         continue;
1559
1560                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1561                         printf("Please remove port %d from forwarding configuration.\n", pi);
1562                         continue;
1563                 }
1564
1565                 if (port_is_bonding_slave(pi)) {
1566                         printf("Please remove port %d from bonded device.\n", pi);
1567                         continue;
1568                 }
1569
1570                 port = &ports[pi];
1571                 if (rte_atomic16_cmpset(&(port->port_status),
1572                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1573                         printf("Port %d is already closed\n", pi);
1574                         continue;
1575                 }
1576
1577                 if (rte_atomic16_cmpset(&(port->port_status),
1578                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1579                         printf("Port %d is now not stopped\n", pi);
1580                         continue;
1581                 }
1582
1583                 if (port->flow_list)
1584                         port_flow_flush(pi);
1585                 rte_eth_dev_close(pi);
1586
1587                 if (rte_atomic16_cmpset(&(port->port_status),
1588                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1589                         printf("Port %d cannot be set to closed\n", pi);
1590         }
1591
1592         printf("Done\n");
1593 }
1594
1595 void
1596 attach_port(char *identifier)
1597 {
1598         portid_t pi = 0;
1599         unsigned int socket_id;
1600
1601         printf("Attaching a new port...\n");
1602
1603         if (identifier == NULL) {
1604                 printf("Invalid parameters are specified\n");
1605                 return;
1606         }
1607
1608         if (rte_eth_dev_attach(identifier, &pi))
1609                 return;
1610
1611         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1612         /* if socket_id is invalid, set to 0 */
1613         if (check_socket_id(socket_id) < 0)
1614                 socket_id = 0;
1615         reconfig(pi, socket_id);
1616         rte_eth_promiscuous_enable(pi);
1617
1618         nb_ports = rte_eth_dev_count();
1619
1620         ports[pi].port_status = RTE_PORT_STOPPED;
1621
1622         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1623         printf("Done\n");
1624 }
1625
1626 void
1627 detach_port(uint8_t port_id)
1628 {
1629         char name[RTE_ETH_NAME_MAX_LEN];
1630
1631         printf("Detaching a port...\n");
1632
1633         if (!port_is_closed(port_id)) {
1634                 printf("Please close port first\n");
1635                 return;
1636         }
1637
1638         if (ports[port_id].flow_list)
1639                 port_flow_flush(port_id);
1640
1641         if (rte_eth_dev_detach(port_id, name))
1642                 return;
1643
1644         nb_ports = rte_eth_dev_count();
1645
1646         printf("Port '%s' is detached. Now total ports is %d\n",
1647                         name, nb_ports);
1648         printf("Done\n");
1649         return;
1650 }
1651
1652 void
1653 pmd_test_exit(void)
1654 {
1655         portid_t pt_id;
1656
1657         if (test_done == 0)
1658                 stop_packet_forwarding();
1659
1660         if (ports != NULL) {
1661                 no_link_check = 1;
1662                 RTE_ETH_FOREACH_DEV(pt_id) {
1663                         printf("\nShutting down port %d...\n", pt_id);
1664                         fflush(stdout);
1665                         stop_port(pt_id);
1666                         close_port(pt_id);
1667                 }
1668         }
1669         printf("\nBye...\n");
1670 }
1671
1672 typedef void (*cmd_func_t)(void);
1673 struct pmd_test_command {
1674         const char *cmd_name;
1675         cmd_func_t cmd_func;
1676 };
1677
1678 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1679
1680 /* Check the link status of all ports in up to 9s, and print them finally */
1681 static void
1682 check_all_ports_link_status(uint32_t port_mask)
1683 {
1684 #define CHECK_INTERVAL 100 /* 100ms */
1685 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1686         uint8_t portid, count, all_ports_up, print_flag = 0;
1687         struct rte_eth_link link;
1688
1689         printf("Checking link statuses...\n");
1690         fflush(stdout);
1691         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1692                 all_ports_up = 1;
1693                 RTE_ETH_FOREACH_DEV(portid) {
1694                         if ((port_mask & (1 << portid)) == 0)
1695                                 continue;
1696                         memset(&link, 0, sizeof(link));
1697                         rte_eth_link_get_nowait(portid, &link);
1698                         /* print link status if flag set */
1699                         if (print_flag == 1) {
1700                                 if (link.link_status)
1701                                         printf("Port %d Link Up - speed %u "
1702                                                 "Mbps - %s\n", (uint8_t)portid,
1703                                                 (unsigned)link.link_speed,
1704                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1705                                         ("full-duplex") : ("half-duplex\n"));
1706                                 else
1707                                         printf("Port %d Link Down\n",
1708                                                 (uint8_t)portid);
1709                                 continue;
1710                         }
1711                         /* clear all_ports_up flag if any link down */
1712                         if (link.link_status == ETH_LINK_DOWN) {
1713                                 all_ports_up = 0;
1714                                 break;
1715                         }
1716                 }
1717                 /* after finally printing all link status, get out */
1718                 if (print_flag == 1)
1719                         break;
1720
1721                 if (all_ports_up == 0) {
1722                         fflush(stdout);
1723                         rte_delay_ms(CHECK_INTERVAL);
1724                 }
1725
1726                 /* set the print_flag if all ports up or timeout */
1727                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1728                         print_flag = 1;
1729                 }
1730         }
1731 }
1732
1733 static int
1734 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1735 {
1736         uint16_t i;
1737         int diag;
1738         uint8_t mapping_found = 0;
1739
1740         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1741                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1742                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1743                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1744                                         tx_queue_stats_mappings[i].queue_id,
1745                                         tx_queue_stats_mappings[i].stats_counter_id);
1746                         if (diag != 0)
1747                                 return diag;
1748                         mapping_found = 1;
1749                 }
1750         }
1751         if (mapping_found)
1752                 port->tx_queue_stats_mapping_enabled = 1;
1753         return 0;
1754 }
1755
1756 static int
1757 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1758 {
1759         uint16_t i;
1760         int diag;
1761         uint8_t mapping_found = 0;
1762
1763         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1764                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1765                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1766                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1767                                         rx_queue_stats_mappings[i].queue_id,
1768                                         rx_queue_stats_mappings[i].stats_counter_id);
1769                         if (diag != 0)
1770                                 return diag;
1771                         mapping_found = 1;
1772                 }
1773         }
1774         if (mapping_found)
1775                 port->rx_queue_stats_mapping_enabled = 1;
1776         return 0;
1777 }
1778
1779 static void
1780 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1781 {
1782         int diag = 0;
1783
1784         diag = set_tx_queue_stats_mapping_registers(pi, port);
1785         if (diag != 0) {
1786                 if (diag == -ENOTSUP) {
1787                         port->tx_queue_stats_mapping_enabled = 0;
1788                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1789                 }
1790                 else
1791                         rte_exit(EXIT_FAILURE,
1792                                         "set_tx_queue_stats_mapping_registers "
1793                                         "failed for port id=%d diag=%d\n",
1794                                         pi, diag);
1795         }
1796
1797         diag = set_rx_queue_stats_mapping_registers(pi, port);
1798         if (diag != 0) {
1799                 if (diag == -ENOTSUP) {
1800                         port->rx_queue_stats_mapping_enabled = 0;
1801                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1802                 }
1803                 else
1804                         rte_exit(EXIT_FAILURE,
1805                                         "set_rx_queue_stats_mapping_registers "
1806                                         "failed for port id=%d diag=%d\n",
1807                                         pi, diag);
1808         }
1809 }
1810
1811 static void
1812 rxtx_port_config(struct rte_port *port)
1813 {
1814         port->rx_conf = port->dev_info.default_rxconf;
1815         port->tx_conf = port->dev_info.default_txconf;
1816
1817         /* Check if any RX/TX parameters have been passed */
1818         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1819                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1820
1821         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1822                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1823
1824         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1825                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1826
1827         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1828                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1829
1830         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1831                 port->rx_conf.rx_drop_en = rx_drop_en;
1832
1833         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1834                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1835
1836         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1837                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1838
1839         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1840                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1841
1842         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1843                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1844
1845         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1846                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1847
1848         if (txq_flags != RTE_PMD_PARAM_UNSET)
1849                 port->tx_conf.txq_flags = txq_flags;
1850 }
1851
1852 void
1853 init_port_config(void)
1854 {
1855         portid_t pid;
1856         struct rte_port *port;
1857
1858         RTE_ETH_FOREACH_DEV(pid) {
1859                 port = &ports[pid];
1860                 port->dev_conf.rxmode = rx_mode;
1861                 port->dev_conf.fdir_conf = fdir_conf;
1862                 if (nb_rxq > 1) {
1863                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1864                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1865                 } else {
1866                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1867                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1868                 }
1869
1870                 if (port->dcb_flag == 0) {
1871                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1872                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1873                         else
1874                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1875                 }
1876
1877                 rxtx_port_config(port);
1878
1879                 rte_eth_macaddr_get(pid, &port->eth_addr);
1880
1881                 map_port_queue_stats_mapping_registers(pid, port);
1882 #ifdef RTE_NIC_BYPASS
1883                 rte_eth_dev_bypass_init(pid);
1884 #endif
1885         }
1886 }
1887
1888 void set_port_slave_flag(portid_t slave_pid)
1889 {
1890         struct rte_port *port;
1891
1892         port = &ports[slave_pid];
1893         port->slave_flag = 1;
1894 }
1895
1896 void clear_port_slave_flag(portid_t slave_pid)
1897 {
1898         struct rte_port *port;
1899
1900         port = &ports[slave_pid];
1901         port->slave_flag = 0;
1902 }
1903
1904 uint8_t port_is_bonding_slave(portid_t slave_pid)
1905 {
1906         struct rte_port *port;
1907
1908         port = &ports[slave_pid];
1909         return port->slave_flag;
1910 }
1911
1912 const uint16_t vlan_tags[] = {
1913                 0,  1,  2,  3,  4,  5,  6,  7,
1914                 8,  9, 10, 11,  12, 13, 14, 15,
1915                 16, 17, 18, 19, 20, 21, 22, 23,
1916                 24, 25, 26, 27, 28, 29, 30, 31
1917 };
1918
1919 static  int
1920 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1921                  enum dcb_mode_enable dcb_mode,
1922                  enum rte_eth_nb_tcs num_tcs,
1923                  uint8_t pfc_en)
1924 {
1925         uint8_t i;
1926
1927         /*
1928          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1929          * given above, and the number of traffic classes available for use.
1930          */
1931         if (dcb_mode == DCB_VT_ENABLED) {
1932                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1933                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1934                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1935                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1936
1937                 /* VMDQ+DCB RX and TX configurations */
1938                 vmdq_rx_conf->enable_default_pool = 0;
1939                 vmdq_rx_conf->default_pool = 0;
1940                 vmdq_rx_conf->nb_queue_pools =
1941                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1942                 vmdq_tx_conf->nb_queue_pools =
1943                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1944
1945                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1946                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1947                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1948                         vmdq_rx_conf->pool_map[i].pools =
1949                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1950                 }
1951                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1952                         vmdq_rx_conf->dcb_tc[i] = i;
1953                         vmdq_tx_conf->dcb_tc[i] = i;
1954                 }
1955
1956                 /* set DCB mode of RX and TX of multiple queues */
1957                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1958                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1959         } else {
1960                 struct rte_eth_dcb_rx_conf *rx_conf =
1961                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1962                 struct rte_eth_dcb_tx_conf *tx_conf =
1963                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1964
1965                 rx_conf->nb_tcs = num_tcs;
1966                 tx_conf->nb_tcs = num_tcs;
1967
1968                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1969                         rx_conf->dcb_tc[i] = i % num_tcs;
1970                         tx_conf->dcb_tc[i] = i % num_tcs;
1971                 }
1972                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1973                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1974                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1975         }
1976
1977         if (pfc_en)
1978                 eth_conf->dcb_capability_en =
1979                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1980         else
1981                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1982
1983         return 0;
1984 }
1985
1986 int
1987 init_port_dcb_config(portid_t pid,
1988                      enum dcb_mode_enable dcb_mode,
1989                      enum rte_eth_nb_tcs num_tcs,
1990                      uint8_t pfc_en)
1991 {
1992         struct rte_eth_conf port_conf;
1993         struct rte_port *rte_port;
1994         int retval;
1995         uint16_t i;
1996
1997         rte_port = &ports[pid];
1998
1999         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2000         /* Enter DCB configuration status */
2001         dcb_config = 1;
2002
2003         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2004         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2005         if (retval < 0)
2006                 return retval;
2007         port_conf.rxmode.hw_vlan_filter = 1;
2008
2009         /**
2010          * Write the configuration into the device.
2011          * Set the numbers of RX & TX queues to 0, so
2012          * the RX & TX queues will not be setup.
2013          */
2014         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2015
2016         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2017
2018         /* If dev_info.vmdq_pool_base is greater than 0,
2019          * the queue id of vmdq pools is started after pf queues.
2020          */
2021         if (dcb_mode == DCB_VT_ENABLED &&
2022             rte_port->dev_info.vmdq_pool_base > 0) {
2023                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2024                         " for port %d.", pid);
2025                 return -1;
2026         }
2027
2028         /* Assume the ports in testpmd have the same dcb capability
2029          * and has the same number of rxq and txq in dcb mode
2030          */
2031         if (dcb_mode == DCB_VT_ENABLED) {
2032                 if (rte_port->dev_info.max_vfs > 0) {
2033                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2034                         nb_txq = rte_port->dev_info.nb_tx_queues;
2035                 } else {
2036                         nb_rxq = rte_port->dev_info.max_rx_queues;
2037                         nb_txq = rte_port->dev_info.max_tx_queues;
2038                 }
2039         } else {
2040                 /*if vt is disabled, use all pf queues */
2041                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2042                         nb_rxq = rte_port->dev_info.max_rx_queues;
2043                         nb_txq = rte_port->dev_info.max_tx_queues;
2044                 } else {
2045                         nb_rxq = (queueid_t)num_tcs;
2046                         nb_txq = (queueid_t)num_tcs;
2047
2048                 }
2049         }
2050         rx_free_thresh = 64;
2051
2052         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2053
2054         rxtx_port_config(rte_port);
2055         /* VLAN filter */
2056         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2057         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2058                 rx_vft_set(pid, vlan_tags[i], 1);
2059
2060         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2061         map_port_queue_stats_mapping_registers(pid, rte_port);
2062
2063         rte_port->dcb_flag = 1;
2064
2065         return 0;
2066 }
2067
2068 static void
2069 init_port(void)
2070 {
2071         /* Configuration of Ethernet ports. */
2072         ports = rte_zmalloc("testpmd: ports",
2073                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2074                             RTE_CACHE_LINE_SIZE);
2075         if (ports == NULL) {
2076                 rte_exit(EXIT_FAILURE,
2077                                 "rte_zmalloc(%d struct rte_port) failed\n",
2078                                 RTE_MAX_ETHPORTS);
2079         }
2080 }
2081
2082 static void
2083 force_quit(void)
2084 {
2085         pmd_test_exit();
2086         prompt_exit();
2087 }
2088
2089 static void
2090 signal_handler(int signum)
2091 {
2092         if (signum == SIGINT || signum == SIGTERM) {
2093                 printf("\nSignal %d received, preparing to exit...\n",
2094                                 signum);
2095 #ifdef RTE_LIBRTE_PDUMP
2096                 /* uninitialize packet capture framework */
2097                 rte_pdump_uninit();
2098 #endif
2099 #ifdef RTE_LIBRTE_LATENCY_STATS
2100                 rte_latencystats_uninit();
2101 #endif
2102                 force_quit();
2103                 /* exit with the expected status */
2104                 signal(signum, SIG_DFL);
2105                 kill(getpid(), signum);
2106         }
2107 }
2108
2109 int
2110 main(int argc, char** argv)
2111 {
2112         int  diag;
2113         uint8_t port_id;
2114
2115         signal(SIGINT, signal_handler);
2116         signal(SIGTERM, signal_handler);
2117
2118         diag = rte_eal_init(argc, argv);
2119         if (diag < 0)
2120                 rte_panic("Cannot init EAL\n");
2121
2122 #ifdef RTE_LIBRTE_PDUMP
2123         /* initialize packet capture framework */
2124         rte_pdump_init(NULL);
2125 #endif
2126
2127         nb_ports = (portid_t) rte_eth_dev_count();
2128         if (nb_ports == 0)
2129                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2130
2131         /* allocate port structures, and init them */
2132         init_port();
2133
2134         set_def_fwd_config();
2135         if (nb_lcores == 0)
2136                 rte_panic("Empty set of forwarding logical cores - check the "
2137                           "core mask supplied in the command parameters\n");
2138
2139         argc -= diag;
2140         argv += diag;
2141         if (argc > 1)
2142                 launch_args_parse(argc, argv);
2143
2144         if (!nb_rxq && !nb_txq)
2145                 printf("Warning: Either rx or tx queues should be non-zero\n");
2146
2147         if (nb_rxq > 1 && nb_rxq > nb_txq)
2148                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2149                        "but nb_txq=%d will prevent to fully test it.\n",
2150                        nb_rxq, nb_txq);
2151
2152         init_config();
2153         if (start_port(RTE_PORT_ALL) != 0)
2154                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2155
2156         /* set all ports to promiscuous mode by default */
2157         RTE_ETH_FOREACH_DEV(port_id)
2158                 rte_eth_promiscuous_enable(port_id);
2159
2160         /* Init metrics library */
2161         rte_metrics_init(rte_socket_id());
2162
2163 #ifdef RTE_LIBRTE_LATENCY_STATS
2164         if (latencystats_enabled != 0) {
2165                 int ret = rte_latencystats_init(1, NULL);
2166                 if (ret)
2167                         printf("Warning: latencystats init()"
2168                                 " returned error %d\n", ret);
2169                 printf("Latencystats running on lcore %d\n",
2170                         latencystats_lcore_id);
2171         }
2172 #endif
2173
2174         /* Setup bitrate stats */
2175 #ifdef RTE_LIBRTE_BITRATE
2176         bitrate_data = rte_stats_bitrate_create();
2177         if (bitrate_data == NULL)
2178                 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2179         rte_stats_bitrate_reg(bitrate_data);
2180 #endif
2181
2182
2183 #ifdef RTE_LIBRTE_CMDLINE
2184         if (interactive == 1) {
2185                 if (auto_start) {
2186                         printf("Start automatic packet forwarding\n");
2187                         start_packet_forwarding(0);
2188                 }
2189                 prompt();
2190         } else
2191 #endif
2192         {
2193                 char c;
2194                 int rc;
2195
2196                 printf("No commandline core given, start packet forwarding\n");
2197                 start_packet_forwarding(0);
2198                 printf("Press enter to exit\n");
2199                 rc = read(0, &c, 1);
2200                 pmd_test_exit();
2201                 if (rc < 0)
2202                         return 1;
2203         }
2204
2205         return 0;
2206 }