ethdev: fix device state on detach
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
81 #endif
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
84 #endif
85 #include <rte_flow.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
89 #endif
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
92 #endif
93 #include <rte_gro.h>
94
95 #include "testpmd.h"
96
97 uint16_t verbose_level = 0; /**< Silent by default. */
98
99 /* use master core for command line ? */
100 uint8_t interactive = 0;
101 uint8_t auto_start = 0;
102 uint8_t tx_first;
103 char cmdline_filename[PATH_MAX] = {0};
104
105 /*
106  * NUMA support configuration.
107  * When set, the NUMA support attempts to dispatch the allocation of the
108  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
109  * probed ports among the CPU sockets 0 and 1.
110  * Otherwise, all memory is allocated from CPU socket 0.
111  */
112 uint8_t numa_support = 1; /**< numa enabled by default */
113
114 /*
115  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
116  * not configured.
117  */
118 uint8_t socket_num = UMA_NO_CONFIG;
119
120 /*
121  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122  */
123 uint8_t mp_anon = 0;
124
125 /*
126  * Record the Ethernet address of peer target ports to which packets are
127  * forwarded.
128  * Must be instantiated with the ethernet addresses of peer traffic generator
129  * ports.
130  */
131 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
132 portid_t nb_peer_eth_addrs = 0;
133
134 /*
135  * Probed Target Environment.
136  */
137 struct rte_port *ports;        /**< For all probed ethernet ports. */
138 portid_t nb_ports;             /**< Number of probed ethernet ports. */
139 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
140 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
141
142 /*
143  * Test Forwarding Configuration.
144  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
145  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
146  */
147 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
148 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
149 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
150 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
151
152 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
153 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
154
155 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
156 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
157
158 /*
159  * Forwarding engines.
160  */
161 struct fwd_engine * fwd_engines[] = {
162         &io_fwd_engine,
163         &mac_fwd_engine,
164         &mac_swap_engine,
165         &flow_gen_engine,
166         &rx_only_engine,
167         &tx_only_engine,
168         &csum_fwd_engine,
169         &icmp_echo_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171         &ieee1588_fwd_engine,
172 #endif
173         NULL,
174 };
175
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186 /*
187  * Configuration of packet segments used by the "txonly" processing engine.
188  */
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191         TXONLY_DEF_PACKET_LEN,
192 };
193 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
197
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
203
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
206
207 /*
208  * Configurable number of RX/TX queues.
209  */
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212
213 /*
214  * Configurable number of RX/TX ring descriptors.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 128
217 #define RTE_TEST_TX_DESC_DEFAULT 512
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253
254 /*
255  * Configurable value of TX queue flags.
256  */
257 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
258
259 /*
260  * Receive Side Scaling (RSS) configuration.
261  */
262 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
263
264 /*
265  * Port topology configuration
266  */
267 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
268
269 /*
270  * Avoids to flush all the RX streams before starts forwarding.
271  */
272 uint8_t no_flush_rx = 0; /* flush by default */
273
274 /*
275  * Flow API isolated mode.
276  */
277 uint8_t flow_isolate_all;
278
279 /*
280  * Avoids to check link status when starting/stopping a port.
281  */
282 uint8_t no_link_check = 0; /* check by default */
283
284 /*
285  * Enable link status change notification
286  */
287 uint8_t lsc_interrupt = 1; /* enabled by default */
288
289 /*
290  * Enable device removal notification.
291  */
292 uint8_t rmv_interrupt = 1; /* enabled by default */
293
294 /*
295  * Display or mask ether events
296  * Default to all events except VF_MBOX
297  */
298 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
299                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
300                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
301                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
302                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
303                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
304
305 /*
306  * NIC bypass mode configuration options.
307  */
308
309 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
310 /* The NIC bypass watchdog timeout. */
311 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
312 #endif
313
314
315 #ifdef RTE_LIBRTE_LATENCY_STATS
316
317 /*
318  * Set when latency stats is enabled in the commandline
319  */
320 uint8_t latencystats_enabled;
321
322 /*
323  * Lcore ID to serive latency statistics.
324  */
325 lcoreid_t latencystats_lcore_id = -1;
326
327 #endif
328
329 /*
330  * Ethernet device configuration.
331  */
332 struct rte_eth_rxmode rx_mode = {
333         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
334         .split_hdr_size = 0,
335         .header_split   = 0, /**< Header Split disabled. */
336         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
337         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
338         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
339         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
340         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
341         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
342 };
343
344 struct rte_fdir_conf fdir_conf = {
345         .mode = RTE_FDIR_MODE_NONE,
346         .pballoc = RTE_FDIR_PBALLOC_64K,
347         .status = RTE_FDIR_REPORT_STATUS,
348         .mask = {
349                 .vlan_tci_mask = 0x0,
350                 .ipv4_mask     = {
351                         .src_ip = 0xFFFFFFFF,
352                         .dst_ip = 0xFFFFFFFF,
353                 },
354                 .ipv6_mask     = {
355                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357                 },
358                 .src_port_mask = 0xFFFF,
359                 .dst_port_mask = 0xFFFF,
360                 .mac_addr_byte_mask = 0xFF,
361                 .tunnel_type_mask = 1,
362                 .tunnel_id_mask = 0xFFFFFFFF,
363         },
364         .drop_queue = 127,
365 };
366
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
380
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
386 #endif
387
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
389
390 /* Forward function declarations */
391 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
392 static void check_all_ports_link_status(uint32_t port_mask);
393 static int eth_event_callback(uint8_t port_id,
394                               enum rte_eth_event_type type,
395                               void *param, void *ret_param);
396
397 /*
398  * Check if all the ports are started.
399  * If yes, return positive value. If not, return zero.
400  */
401 static int all_ports_started(void);
402
403 /*
404  * Helper function to check if socket is already discovered.
405  * If yes, return positive value. If not, return zero.
406  */
407 int
408 new_socket_id(unsigned int socket_id)
409 {
410         unsigned int i;
411
412         for (i = 0; i < num_sockets; i++) {
413                 if (socket_ids[i] == socket_id)
414                         return 0;
415         }
416         return 1;
417 }
418
419 /*
420  * Setup default configuration.
421  */
422 static void
423 set_default_fwd_lcores_config(void)
424 {
425         unsigned int i;
426         unsigned int nb_lc;
427         unsigned int sock_num;
428
429         nb_lc = 0;
430         for (i = 0; i < RTE_MAX_LCORE; i++) {
431                 sock_num = rte_lcore_to_socket_id(i);
432                 if (new_socket_id(sock_num)) {
433                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
434                                 rte_exit(EXIT_FAILURE,
435                                          "Total sockets greater than %u\n",
436                                          RTE_MAX_NUMA_NODES);
437                         }
438                         socket_ids[num_sockets++] = sock_num;
439                 }
440                 if (!rte_lcore_is_enabled(i))
441                         continue;
442                 if (i == rte_get_master_lcore())
443                         continue;
444                 fwd_lcores_cpuids[nb_lc++] = i;
445         }
446         nb_lcores = (lcoreid_t) nb_lc;
447         nb_cfg_lcores = nb_lcores;
448         nb_fwd_lcores = 1;
449 }
450
451 static void
452 set_def_peer_eth_addrs(void)
453 {
454         portid_t i;
455
456         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
457                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
458                 peer_eth_addrs[i].addr_bytes[5] = i;
459         }
460 }
461
462 static void
463 set_default_fwd_ports_config(void)
464 {
465         portid_t pt_id;
466
467         for (pt_id = 0; pt_id < nb_ports; pt_id++)
468                 fwd_ports_ids[pt_id] = pt_id;
469
470         nb_cfg_ports = nb_ports;
471         nb_fwd_ports = nb_ports;
472 }
473
474 void
475 set_def_fwd_config(void)
476 {
477         set_default_fwd_lcores_config();
478         set_def_peer_eth_addrs();
479         set_default_fwd_ports_config();
480 }
481
482 /*
483  * Configuration initialisation done once at init time.
484  */
485 static void
486 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
487                  unsigned int socket_id)
488 {
489         char pool_name[RTE_MEMPOOL_NAMESIZE];
490         struct rte_mempool *rte_mp = NULL;
491         uint32_t mb_size;
492
493         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
494         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
495
496         RTE_LOG(INFO, USER1,
497                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
498                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
499
500 #ifdef RTE_LIBRTE_PMD_XENVIRT
501         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
502                 (unsigned) mb_mempool_cache,
503                 sizeof(struct rte_pktmbuf_pool_private),
504                 rte_pktmbuf_pool_init, NULL,
505                 rte_pktmbuf_init, NULL,
506                 socket_id, 0);
507 #endif
508
509         /* if the former XEN allocation failed fall back to normal allocation */
510         if (rte_mp == NULL) {
511                 if (mp_anon != 0) {
512                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
513                                 mb_size, (unsigned) mb_mempool_cache,
514                                 sizeof(struct rte_pktmbuf_pool_private),
515                                 socket_id, 0);
516                         if (rte_mp == NULL)
517                                 goto err;
518
519                         if (rte_mempool_populate_anon(rte_mp) == 0) {
520                                 rte_mempool_free(rte_mp);
521                                 rte_mp = NULL;
522                                 goto err;
523                         }
524                         rte_pktmbuf_pool_init(rte_mp, NULL);
525                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
526                 } else {
527                         /* wrapper to rte_mempool_create() */
528                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
529                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
530                 }
531         }
532
533 err:
534         if (rte_mp == NULL) {
535                 rte_exit(EXIT_FAILURE,
536                         "Creation of mbuf pool for socket %u failed: %s\n",
537                         socket_id, rte_strerror(rte_errno));
538         } else if (verbose_level > 0) {
539                 rte_mempool_dump(stdout, rte_mp);
540         }
541 }
542
543 /*
544  * Check given socket id is valid or not with NUMA mode,
545  * if valid, return 0, else return -1
546  */
547 static int
548 check_socket_id(const unsigned int socket_id)
549 {
550         static int warning_once = 0;
551
552         if (new_socket_id(socket_id)) {
553                 if (!warning_once && numa_support)
554                         printf("Warning: NUMA should be configured manually by"
555                                " using --port-numa-config and"
556                                " --ring-numa-config parameters along with"
557                                " --numa.\n");
558                 warning_once = 1;
559                 return -1;
560         }
561         return 0;
562 }
563
564 static void
565 init_config(void)
566 {
567         portid_t pid;
568         struct rte_port *port;
569         struct rte_mempool *mbp;
570         unsigned int nb_mbuf_per_pool;
571         lcoreid_t  lc_id;
572         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
573
574         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
575
576         if (numa_support) {
577                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
578                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
579                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
580         }
581
582         /* Configuration of logical cores. */
583         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
584                                 sizeof(struct fwd_lcore *) * nb_lcores,
585                                 RTE_CACHE_LINE_SIZE);
586         if (fwd_lcores == NULL) {
587                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
588                                                         "failed\n", nb_lcores);
589         }
590         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
592                                                sizeof(struct fwd_lcore),
593                                                RTE_CACHE_LINE_SIZE);
594                 if (fwd_lcores[lc_id] == NULL) {
595                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
596                                                                 "failed\n");
597                 }
598                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
599         }
600
601         RTE_ETH_FOREACH_DEV(pid) {
602                 port = &ports[pid];
603                 rte_eth_dev_info_get(pid, &port->dev_info);
604
605                 if (numa_support) {
606                         if (port_numa[pid] != NUMA_NO_CONFIG)
607                                 port_per_socket[port_numa[pid]]++;
608                         else {
609                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
610
611                                 /* if socket_id is invalid, set to 0 */
612                                 if (check_socket_id(socket_id) < 0)
613                                         socket_id = 0;
614                                 port_per_socket[socket_id]++;
615                         }
616                 }
617
618                 /* set flag to initialize port/queue */
619                 port->need_reconfig = 1;
620                 port->need_reconfig_queues = 1;
621         }
622
623         /*
624          * Create pools of mbuf.
625          * If NUMA support is disabled, create a single pool of mbuf in
626          * socket 0 memory by default.
627          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
628          *
629          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
630          * nb_txd can be configured at run time.
631          */
632         if (param_total_num_mbufs)
633                 nb_mbuf_per_pool = param_total_num_mbufs;
634         else {
635                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
636                         (nb_lcores * mb_mempool_cache) +
637                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
638                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
639         }
640
641         if (numa_support) {
642                 uint8_t i;
643
644                 for (i = 0; i < num_sockets; i++)
645                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
646                                          socket_ids[i]);
647         } else {
648                 if (socket_num == UMA_NO_CONFIG)
649                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
650                 else
651                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
652                                                  socket_num);
653         }
654
655         init_port_config();
656
657         /*
658          * Records which Mbuf pool to use by each logical core, if needed.
659          */
660         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
661                 mbp = mbuf_pool_find(
662                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
663
664                 if (mbp == NULL)
665                         mbp = mbuf_pool_find(0);
666                 fwd_lcores[lc_id]->mbp = mbp;
667         }
668
669         /* Configuration of packet forwarding streams. */
670         if (init_fwd_streams() < 0)
671                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
672
673         fwd_config_setup();
674 }
675
676
677 void
678 reconfig(portid_t new_port_id, unsigned socket_id)
679 {
680         struct rte_port *port;
681
682         /* Reconfiguration of Ethernet ports. */
683         port = &ports[new_port_id];
684         rte_eth_dev_info_get(new_port_id, &port->dev_info);
685
686         /* set flag to initialize port/queue */
687         port->need_reconfig = 1;
688         port->need_reconfig_queues = 1;
689         port->socket_id = socket_id;
690
691         init_port_config();
692 }
693
694
695 int
696 init_fwd_streams(void)
697 {
698         portid_t pid;
699         struct rte_port *port;
700         streamid_t sm_id, nb_fwd_streams_new;
701         queueid_t q;
702
703         /* set socket id according to numa or not */
704         RTE_ETH_FOREACH_DEV(pid) {
705                 port = &ports[pid];
706                 if (nb_rxq > port->dev_info.max_rx_queues) {
707                         printf("Fail: nb_rxq(%d) is greater than "
708                                 "max_rx_queues(%d)\n", nb_rxq,
709                                 port->dev_info.max_rx_queues);
710                         return -1;
711                 }
712                 if (nb_txq > port->dev_info.max_tx_queues) {
713                         printf("Fail: nb_txq(%d) is greater than "
714                                 "max_tx_queues(%d)\n", nb_txq,
715                                 port->dev_info.max_tx_queues);
716                         return -1;
717                 }
718                 if (numa_support) {
719                         if (port_numa[pid] != NUMA_NO_CONFIG)
720                                 port->socket_id = port_numa[pid];
721                         else {
722                                 port->socket_id = rte_eth_dev_socket_id(pid);
723
724                                 /* if socket_id is invalid, set to 0 */
725                                 if (check_socket_id(port->socket_id) < 0)
726                                         port->socket_id = 0;
727                         }
728                 }
729                 else {
730                         if (socket_num == UMA_NO_CONFIG)
731                                 port->socket_id = 0;
732                         else
733                                 port->socket_id = socket_num;
734                 }
735         }
736
737         q = RTE_MAX(nb_rxq, nb_txq);
738         if (q == 0) {
739                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
740                 return -1;
741         }
742         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
743         if (nb_fwd_streams_new == nb_fwd_streams)
744                 return 0;
745         /* clear the old */
746         if (fwd_streams != NULL) {
747                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
748                         if (fwd_streams[sm_id] == NULL)
749                                 continue;
750                         rte_free(fwd_streams[sm_id]);
751                         fwd_streams[sm_id] = NULL;
752                 }
753                 rte_free(fwd_streams);
754                 fwd_streams = NULL;
755         }
756
757         /* init new */
758         nb_fwd_streams = nb_fwd_streams_new;
759         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
760                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
761         if (fwd_streams == NULL)
762                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
763                                                 "failed\n", nb_fwd_streams);
764
765         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
766                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
767                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
768                 if (fwd_streams[sm_id] == NULL)
769                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
770                                                                 " failed\n");
771         }
772
773         return 0;
774 }
775
776 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
777 static void
778 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
779 {
780         unsigned int total_burst;
781         unsigned int nb_burst;
782         unsigned int burst_stats[3];
783         uint16_t pktnb_stats[3];
784         uint16_t nb_pkt;
785         int burst_percent[3];
786
787         /*
788          * First compute the total number of packet bursts and the
789          * two highest numbers of bursts of the same number of packets.
790          */
791         total_burst = 0;
792         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
793         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
794         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
795                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
796                 if (nb_burst == 0)
797                         continue;
798                 total_burst += nb_burst;
799                 if (nb_burst > burst_stats[0]) {
800                         burst_stats[1] = burst_stats[0];
801                         pktnb_stats[1] = pktnb_stats[0];
802                         burst_stats[0] = nb_burst;
803                         pktnb_stats[0] = nb_pkt;
804                 }
805         }
806         if (total_burst == 0)
807                 return;
808         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
809         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
810                burst_percent[0], (int) pktnb_stats[0]);
811         if (burst_stats[0] == total_burst) {
812                 printf("]\n");
813                 return;
814         }
815         if (burst_stats[0] + burst_stats[1] == total_burst) {
816                 printf(" + %d%% of %d pkts]\n",
817                        100 - burst_percent[0], pktnb_stats[1]);
818                 return;
819         }
820         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
821         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
822         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
823                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
824                 return;
825         }
826         printf(" + %d%% of %d pkts + %d%% of others]\n",
827                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
828 }
829 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
830
831 static void
832 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
833 {
834         struct rte_port *port;
835         uint8_t i;
836
837         static const char *fwd_stats_border = "----------------------";
838
839         port = &ports[port_id];
840         printf("\n  %s Forward statistics for port %-2d %s\n",
841                fwd_stats_border, port_id, fwd_stats_border);
842
843         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
844                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
845                        "%-"PRIu64"\n",
846                        stats->ipackets, stats->imissed,
847                        (uint64_t) (stats->ipackets + stats->imissed));
848
849                 if (cur_fwd_eng == &csum_fwd_engine)
850                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
851                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
852                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
853                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
854                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
855                 }
856
857                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
858                        "%-"PRIu64"\n",
859                        stats->opackets, port->tx_dropped,
860                        (uint64_t) (stats->opackets + port->tx_dropped));
861         }
862         else {
863                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
864                        "%14"PRIu64"\n",
865                        stats->ipackets, stats->imissed,
866                        (uint64_t) (stats->ipackets + stats->imissed));
867
868                 if (cur_fwd_eng == &csum_fwd_engine)
869                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
870                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
871                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
872                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
873                         printf("  RX-nombufs:             %14"PRIu64"\n",
874                                stats->rx_nombuf);
875                 }
876
877                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
878                        "%14"PRIu64"\n",
879                        stats->opackets, port->tx_dropped,
880                        (uint64_t) (stats->opackets + port->tx_dropped));
881         }
882
883 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
884         if (port->rx_stream)
885                 pkt_burst_stats_display("RX",
886                         &port->rx_stream->rx_burst_stats);
887         if (port->tx_stream)
888                 pkt_burst_stats_display("TX",
889                         &port->tx_stream->tx_burst_stats);
890 #endif
891
892         if (port->rx_queue_stats_mapping_enabled) {
893                 printf("\n");
894                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895                         printf("  Stats reg %2d RX-packets:%14"PRIu64
896                                "     RX-errors:%14"PRIu64
897                                "    RX-bytes:%14"PRIu64"\n",
898                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
899                 }
900                 printf("\n");
901         }
902         if (port->tx_queue_stats_mapping_enabled) {
903                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
904                         printf("  Stats reg %2d TX-packets:%14"PRIu64
905                                "                                 TX-bytes:%14"PRIu64"\n",
906                                i, stats->q_opackets[i], stats->q_obytes[i]);
907                 }
908         }
909
910         printf("  %s--------------------------------%s\n",
911                fwd_stats_border, fwd_stats_border);
912 }
913
914 static void
915 fwd_stream_stats_display(streamid_t stream_id)
916 {
917         struct fwd_stream *fs;
918         static const char *fwd_top_stats_border = "-------";
919
920         fs = fwd_streams[stream_id];
921         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
922             (fs->fwd_dropped == 0))
923                 return;
924         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
925                "TX Port=%2d/Queue=%2d %s\n",
926                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
927                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
928         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
929                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
930
931         /* if checksum mode */
932         if (cur_fwd_eng == &csum_fwd_engine) {
933                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
934                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
935         }
936
937 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
938         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
939         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
940 #endif
941 }
942
943 static void
944 flush_fwd_rx_queues(void)
945 {
946         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
947         portid_t  rxp;
948         portid_t port_id;
949         queueid_t rxq;
950         uint16_t  nb_rx;
951         uint16_t  i;
952         uint8_t   j;
953         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
954         uint64_t timer_period;
955
956         /* convert to number of cycles */
957         timer_period = rte_get_timer_hz(); /* 1 second timeout */
958
959         for (j = 0; j < 2; j++) {
960                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
961                         for (rxq = 0; rxq < nb_rxq; rxq++) {
962                                 port_id = fwd_ports_ids[rxp];
963                                 /**
964                                 * testpmd can stuck in the below do while loop
965                                 * if rte_eth_rx_burst() always returns nonzero
966                                 * packets. So timer is added to exit this loop
967                                 * after 1sec timer expiry.
968                                 */
969                                 prev_tsc = rte_rdtsc();
970                                 do {
971                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
972                                                 pkts_burst, MAX_PKT_BURST);
973                                         for (i = 0; i < nb_rx; i++)
974                                                 rte_pktmbuf_free(pkts_burst[i]);
975
976                                         cur_tsc = rte_rdtsc();
977                                         diff_tsc = cur_tsc - prev_tsc;
978                                         timer_tsc += diff_tsc;
979                                 } while ((nb_rx > 0) &&
980                                         (timer_tsc < timer_period));
981                                 timer_tsc = 0;
982                         }
983                 }
984                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
985         }
986 }
987
988 static void
989 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
990 {
991         struct fwd_stream **fsm;
992         streamid_t nb_fs;
993         streamid_t sm_id;
994 #ifdef RTE_LIBRTE_BITRATE
995         uint64_t tics_per_1sec;
996         uint64_t tics_datum;
997         uint64_t tics_current;
998         uint8_t idx_port, cnt_ports;
999
1000         cnt_ports = rte_eth_dev_count();
1001         tics_datum = rte_rdtsc();
1002         tics_per_1sec = rte_get_timer_hz();
1003 #endif
1004         fsm = &fwd_streams[fc->stream_idx];
1005         nb_fs = fc->stream_nb;
1006         do {
1007                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1008                         (*pkt_fwd)(fsm[sm_id]);
1009 #ifdef RTE_LIBRTE_BITRATE
1010                 if (bitrate_enabled != 0 &&
1011                                 bitrate_lcore_id == rte_lcore_id()) {
1012                         tics_current = rte_rdtsc();
1013                         if (tics_current - tics_datum >= tics_per_1sec) {
1014                                 /* Periodic bitrate calculation */
1015                                 for (idx_port = 0;
1016                                                 idx_port < cnt_ports;
1017                                                 idx_port++)
1018                                         rte_stats_bitrate_calc(bitrate_data,
1019                                                 idx_port);
1020                                 tics_datum = tics_current;
1021                         }
1022                 }
1023 #endif
1024 #ifdef RTE_LIBRTE_LATENCY_STATS
1025                 if (latencystats_enabled != 0 &&
1026                                 latencystats_lcore_id == rte_lcore_id())
1027                         rte_latencystats_update();
1028 #endif
1029
1030         } while (! fc->stopped);
1031 }
1032
1033 static int
1034 start_pkt_forward_on_core(void *fwd_arg)
1035 {
1036         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1037                              cur_fwd_config.fwd_eng->packet_fwd);
1038         return 0;
1039 }
1040
1041 /*
1042  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1043  * Used to start communication flows in network loopback test configurations.
1044  */
1045 static int
1046 run_one_txonly_burst_on_core(void *fwd_arg)
1047 {
1048         struct fwd_lcore *fwd_lc;
1049         struct fwd_lcore tmp_lcore;
1050
1051         fwd_lc = (struct fwd_lcore *) fwd_arg;
1052         tmp_lcore = *fwd_lc;
1053         tmp_lcore.stopped = 1;
1054         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1055         return 0;
1056 }
1057
1058 /*
1059  * Launch packet forwarding:
1060  *     - Setup per-port forwarding context.
1061  *     - launch logical cores with their forwarding configuration.
1062  */
1063 static void
1064 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1065 {
1066         port_fwd_begin_t port_fwd_begin;
1067         unsigned int i;
1068         unsigned int lc_id;
1069         int diag;
1070
1071         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1072         if (port_fwd_begin != NULL) {
1073                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1074                         (*port_fwd_begin)(fwd_ports_ids[i]);
1075         }
1076         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1077                 lc_id = fwd_lcores_cpuids[i];
1078                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1079                         fwd_lcores[i]->stopped = 0;
1080                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1081                                                      fwd_lcores[i], lc_id);
1082                         if (diag != 0)
1083                                 printf("launch lcore %u failed - diag=%d\n",
1084                                        lc_id, diag);
1085                 }
1086         }
1087 }
1088
1089 /*
1090  * Launch packet forwarding configuration.
1091  */
1092 void
1093 start_packet_forwarding(int with_tx_first)
1094 {
1095         port_fwd_begin_t port_fwd_begin;
1096         port_fwd_end_t  port_fwd_end;
1097         struct rte_port *port;
1098         unsigned int i;
1099         portid_t   pt_id;
1100         streamid_t sm_id;
1101
1102         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1103                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1104
1105         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1106                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1107
1108         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1109                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1110                 (!nb_rxq || !nb_txq))
1111                 rte_exit(EXIT_FAILURE,
1112                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1113                         cur_fwd_eng->fwd_mode_name);
1114
1115         if (all_ports_started() == 0) {
1116                 printf("Not all ports were started\n");
1117                 return;
1118         }
1119         if (test_done == 0) {
1120                 printf("Packet forwarding already started\n");
1121                 return;
1122         }
1123
1124         if (init_fwd_streams() < 0) {
1125                 printf("Fail from init_fwd_streams()\n");
1126                 return;
1127         }
1128
1129         if(dcb_test) {
1130                 for (i = 0; i < nb_fwd_ports; i++) {
1131                         pt_id = fwd_ports_ids[i];
1132                         port = &ports[pt_id];
1133                         if (!port->dcb_flag) {
1134                                 printf("In DCB mode, all forwarding ports must "
1135                                        "be configured in this mode.\n");
1136                                 return;
1137                         }
1138                 }
1139                 if (nb_fwd_lcores == 1) {
1140                         printf("In DCB mode,the nb forwarding cores "
1141                                "should be larger than 1.\n");
1142                         return;
1143                 }
1144         }
1145         test_done = 0;
1146
1147         if(!no_flush_rx)
1148                 flush_fwd_rx_queues();
1149
1150         fwd_config_setup();
1151         pkt_fwd_config_display(&cur_fwd_config);
1152         rxtx_config_display();
1153
1154         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1155                 pt_id = fwd_ports_ids[i];
1156                 port = &ports[pt_id];
1157                 rte_eth_stats_get(pt_id, &port->stats);
1158                 port->tx_dropped = 0;
1159
1160                 map_port_queue_stats_mapping_registers(pt_id, port);
1161         }
1162         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1163                 fwd_streams[sm_id]->rx_packets = 0;
1164                 fwd_streams[sm_id]->tx_packets = 0;
1165                 fwd_streams[sm_id]->fwd_dropped = 0;
1166                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1167                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1168
1169 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1170                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1171                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1172                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1173                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1174 #endif
1175 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1176                 fwd_streams[sm_id]->core_cycles = 0;
1177 #endif
1178         }
1179         if (with_tx_first) {
1180                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1181                 if (port_fwd_begin != NULL) {
1182                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1183                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1184                 }
1185                 while (with_tx_first--) {
1186                         launch_packet_forwarding(
1187                                         run_one_txonly_burst_on_core);
1188                         rte_eal_mp_wait_lcore();
1189                 }
1190                 port_fwd_end = tx_only_engine.port_fwd_end;
1191                 if (port_fwd_end != NULL) {
1192                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1193                                 (*port_fwd_end)(fwd_ports_ids[i]);
1194                 }
1195         }
1196         launch_packet_forwarding(start_pkt_forward_on_core);
1197 }
1198
1199 void
1200 stop_packet_forwarding(void)
1201 {
1202         struct rte_eth_stats stats;
1203         struct rte_port *port;
1204         port_fwd_end_t  port_fwd_end;
1205         int i;
1206         portid_t   pt_id;
1207         streamid_t sm_id;
1208         lcoreid_t  lc_id;
1209         uint64_t total_recv;
1210         uint64_t total_xmit;
1211         uint64_t total_rx_dropped;
1212         uint64_t total_tx_dropped;
1213         uint64_t total_rx_nombuf;
1214         uint64_t tx_dropped;
1215         uint64_t rx_bad_ip_csum;
1216         uint64_t rx_bad_l4_csum;
1217 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1218         uint64_t fwd_cycles;
1219 #endif
1220         static const char *acc_stats_border = "+++++++++++++++";
1221
1222         if (test_done) {
1223                 printf("Packet forwarding not started\n");
1224                 return;
1225         }
1226         printf("Telling cores to stop...");
1227         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1228                 fwd_lcores[lc_id]->stopped = 1;
1229         printf("\nWaiting for lcores to finish...\n");
1230         rte_eal_mp_wait_lcore();
1231         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1232         if (port_fwd_end != NULL) {
1233                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1234                         pt_id = fwd_ports_ids[i];
1235                         (*port_fwd_end)(pt_id);
1236                 }
1237         }
1238 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1239         fwd_cycles = 0;
1240 #endif
1241         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1242                 if (cur_fwd_config.nb_fwd_streams >
1243                     cur_fwd_config.nb_fwd_ports) {
1244                         fwd_stream_stats_display(sm_id);
1245                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1246                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1247                 } else {
1248                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1249                                 fwd_streams[sm_id];
1250                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1251                                 fwd_streams[sm_id];
1252                 }
1253                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1254                 tx_dropped = (uint64_t) (tx_dropped +
1255                                          fwd_streams[sm_id]->fwd_dropped);
1256                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1257
1258                 rx_bad_ip_csum =
1259                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1260                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1261                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1262                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1263                                                         rx_bad_ip_csum;
1264
1265                 rx_bad_l4_csum =
1266                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1267                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1268                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1269                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1270                                                         rx_bad_l4_csum;
1271
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273                 fwd_cycles = (uint64_t) (fwd_cycles +
1274                                          fwd_streams[sm_id]->core_cycles);
1275 #endif
1276         }
1277         total_recv = 0;
1278         total_xmit = 0;
1279         total_rx_dropped = 0;
1280         total_tx_dropped = 0;
1281         total_rx_nombuf  = 0;
1282         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1283                 pt_id = fwd_ports_ids[i];
1284
1285                 port = &ports[pt_id];
1286                 rte_eth_stats_get(pt_id, &stats);
1287                 stats.ipackets -= port->stats.ipackets;
1288                 port->stats.ipackets = 0;
1289                 stats.opackets -= port->stats.opackets;
1290                 port->stats.opackets = 0;
1291                 stats.ibytes   -= port->stats.ibytes;
1292                 port->stats.ibytes = 0;
1293                 stats.obytes   -= port->stats.obytes;
1294                 port->stats.obytes = 0;
1295                 stats.imissed  -= port->stats.imissed;
1296                 port->stats.imissed = 0;
1297                 stats.oerrors  -= port->stats.oerrors;
1298                 port->stats.oerrors = 0;
1299                 stats.rx_nombuf -= port->stats.rx_nombuf;
1300                 port->stats.rx_nombuf = 0;
1301
1302                 total_recv += stats.ipackets;
1303                 total_xmit += stats.opackets;
1304                 total_rx_dropped += stats.imissed;
1305                 total_tx_dropped += port->tx_dropped;
1306                 total_rx_nombuf  += stats.rx_nombuf;
1307
1308                 fwd_port_stats_display(pt_id, &stats);
1309         }
1310         printf("\n  %s Accumulated forward statistics for all ports"
1311                "%s\n",
1312                acc_stats_border, acc_stats_border);
1313         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1314                "%-"PRIu64"\n"
1315                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1316                "%-"PRIu64"\n",
1317                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1318                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1319         if (total_rx_nombuf > 0)
1320                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1321         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1322                "%s\n",
1323                acc_stats_border, acc_stats_border);
1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1325         if (total_recv > 0)
1326                 printf("\n  CPU cycles/packet=%u (total cycles="
1327                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1328                        (unsigned int)(fwd_cycles / total_recv),
1329                        fwd_cycles, total_recv);
1330 #endif
1331         printf("\nDone.\n");
1332         test_done = 1;
1333 }
1334
1335 void
1336 dev_set_link_up(portid_t pid)
1337 {
1338         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1339                 printf("\nSet link up fail.\n");
1340 }
1341
1342 void
1343 dev_set_link_down(portid_t pid)
1344 {
1345         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1346                 printf("\nSet link down fail.\n");
1347 }
1348
1349 static int
1350 all_ports_started(void)
1351 {
1352         portid_t pi;
1353         struct rte_port *port;
1354
1355         RTE_ETH_FOREACH_DEV(pi) {
1356                 port = &ports[pi];
1357                 /* Check if there is a port which is not started */
1358                 if ((port->port_status != RTE_PORT_STARTED) &&
1359                         (port->slave_flag == 0))
1360                         return 0;
1361         }
1362
1363         /* No port is not started */
1364         return 1;
1365 }
1366
1367 int
1368 all_ports_stopped(void)
1369 {
1370         portid_t pi;
1371         struct rte_port *port;
1372
1373         RTE_ETH_FOREACH_DEV(pi) {
1374                 port = &ports[pi];
1375                 if ((port->port_status != RTE_PORT_STOPPED) &&
1376                         (port->slave_flag == 0))
1377                         return 0;
1378         }
1379
1380         return 1;
1381 }
1382
1383 int
1384 port_is_started(portid_t port_id)
1385 {
1386         if (port_id_is_invalid(port_id, ENABLED_WARN))
1387                 return 0;
1388
1389         if (ports[port_id].port_status != RTE_PORT_STARTED)
1390                 return 0;
1391
1392         return 1;
1393 }
1394
1395 static int
1396 port_is_closed(portid_t port_id)
1397 {
1398         if (port_id_is_invalid(port_id, ENABLED_WARN))
1399                 return 0;
1400
1401         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1402                 return 0;
1403
1404         return 1;
1405 }
1406
1407 int
1408 start_port(portid_t pid)
1409 {
1410         int diag, need_check_link_status = -1;
1411         portid_t pi;
1412         queueid_t qi;
1413         struct rte_port *port;
1414         struct ether_addr mac_addr;
1415         enum rte_eth_event_type event_type;
1416
1417         if (port_id_is_invalid(pid, ENABLED_WARN))
1418                 return 0;
1419
1420         if(dcb_config)
1421                 dcb_test = 1;
1422         RTE_ETH_FOREACH_DEV(pi) {
1423                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1424                         continue;
1425
1426                 need_check_link_status = 0;
1427                 port = &ports[pi];
1428                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1429                                                  RTE_PORT_HANDLING) == 0) {
1430                         printf("Port %d is now not stopped\n", pi);
1431                         continue;
1432                 }
1433
1434                 if (port->need_reconfig > 0) {
1435                         port->need_reconfig = 0;
1436
1437                         if (flow_isolate_all) {
1438                                 int ret = port_flow_isolate(pi, 1);
1439                                 if (ret) {
1440                                         printf("Failed to apply isolated"
1441                                                " mode on port %d\n", pi);
1442                                         return -1;
1443                                 }
1444                         }
1445
1446                         printf("Configuring Port %d (socket %u)\n", pi,
1447                                         port->socket_id);
1448                         /* configure port */
1449                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1450                                                 &(port->dev_conf));
1451                         if (diag != 0) {
1452                                 if (rte_atomic16_cmpset(&(port->port_status),
1453                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1454                                         printf("Port %d can not be set back "
1455                                                         "to stopped\n", pi);
1456                                 printf("Fail to configure port %d\n", pi);
1457                                 /* try to reconfigure port next time */
1458                                 port->need_reconfig = 1;
1459                                 return -1;
1460                         }
1461                 }
1462                 if (port->need_reconfig_queues > 0) {
1463                         port->need_reconfig_queues = 0;
1464                         /* setup tx queues */
1465                         for (qi = 0; qi < nb_txq; qi++) {
1466                                 if ((numa_support) &&
1467                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1468                                         diag = rte_eth_tx_queue_setup(pi, qi,
1469                                                 nb_txd,txring_numa[pi],
1470                                                 &(port->tx_conf));
1471                                 else
1472                                         diag = rte_eth_tx_queue_setup(pi, qi,
1473                                                 nb_txd,port->socket_id,
1474                                                 &(port->tx_conf));
1475
1476                                 if (diag == 0)
1477                                         continue;
1478
1479                                 /* Fail to setup tx queue, return */
1480                                 if (rte_atomic16_cmpset(&(port->port_status),
1481                                                         RTE_PORT_HANDLING,
1482                                                         RTE_PORT_STOPPED) == 0)
1483                                         printf("Port %d can not be set back "
1484                                                         "to stopped\n", pi);
1485                                 printf("Fail to configure port %d tx queues\n", pi);
1486                                 /* try to reconfigure queues next time */
1487                                 port->need_reconfig_queues = 1;
1488                                 return -1;
1489                         }
1490                         /* setup rx queues */
1491                         for (qi = 0; qi < nb_rxq; qi++) {
1492                                 if ((numa_support) &&
1493                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1494                                         struct rte_mempool * mp =
1495                                                 mbuf_pool_find(rxring_numa[pi]);
1496                                         if (mp == NULL) {
1497                                                 printf("Failed to setup RX queue:"
1498                                                         "No mempool allocation"
1499                                                         " on the socket %d\n",
1500                                                         rxring_numa[pi]);
1501                                                 return -1;
1502                                         }
1503
1504                                         diag = rte_eth_rx_queue_setup(pi, qi,
1505                                              nb_rxd,rxring_numa[pi],
1506                                              &(port->rx_conf),mp);
1507                                 } else {
1508                                         struct rte_mempool *mp =
1509                                                 mbuf_pool_find(port->socket_id);
1510                                         if (mp == NULL) {
1511                                                 printf("Failed to setup RX queue:"
1512                                                         "No mempool allocation"
1513                                                         " on the socket %d\n",
1514                                                         port->socket_id);
1515                                                 return -1;
1516                                         }
1517                                         diag = rte_eth_rx_queue_setup(pi, qi,
1518                                              nb_rxd,port->socket_id,
1519                                              &(port->rx_conf), mp);
1520                                 }
1521                                 if (diag == 0)
1522                                         continue;
1523
1524                                 /* Fail to setup rx queue, return */
1525                                 if (rte_atomic16_cmpset(&(port->port_status),
1526                                                         RTE_PORT_HANDLING,
1527                                                         RTE_PORT_STOPPED) == 0)
1528                                         printf("Port %d can not be set back "
1529                                                         "to stopped\n", pi);
1530                                 printf("Fail to configure port %d rx queues\n", pi);
1531                                 /* try to reconfigure queues next time */
1532                                 port->need_reconfig_queues = 1;
1533                                 return -1;
1534                         }
1535                 }
1536
1537                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1538                      event_type < RTE_ETH_EVENT_MAX;
1539                      event_type++) {
1540                         diag = rte_eth_dev_callback_register(pi,
1541                                                         event_type,
1542                                                         eth_event_callback,
1543                                                         NULL);
1544                         if (diag) {
1545                                 printf("Failed to setup even callback for event %d\n",
1546                                         event_type);
1547                                 return -1;
1548                         }
1549                 }
1550
1551                 /* start port */
1552                 if (rte_eth_dev_start(pi) < 0) {
1553                         printf("Fail to start port %d\n", pi);
1554
1555                         /* Fail to setup rx queue, return */
1556                         if (rte_atomic16_cmpset(&(port->port_status),
1557                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1558                                 printf("Port %d can not be set back to "
1559                                                         "stopped\n", pi);
1560                         continue;
1561                 }
1562
1563                 if (rte_atomic16_cmpset(&(port->port_status),
1564                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1565                         printf("Port %d can not be set into started\n", pi);
1566
1567                 rte_eth_macaddr_get(pi, &mac_addr);
1568                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1569                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1570                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1571                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1572
1573                 /* at least one port started, need checking link status */
1574                 need_check_link_status = 1;
1575         }
1576
1577         if (need_check_link_status == 1 && !no_link_check)
1578                 check_all_ports_link_status(RTE_PORT_ALL);
1579         else if (need_check_link_status == 0)
1580                 printf("Please stop the ports first\n");
1581
1582         printf("Done\n");
1583         return 0;
1584 }
1585
1586 void
1587 stop_port(portid_t pid)
1588 {
1589         portid_t pi;
1590         struct rte_port *port;
1591         int need_check_link_status = 0;
1592
1593         if (dcb_test) {
1594                 dcb_test = 0;
1595                 dcb_config = 0;
1596         }
1597
1598         if (port_id_is_invalid(pid, ENABLED_WARN))
1599                 return;
1600
1601         printf("Stopping ports...\n");
1602
1603         RTE_ETH_FOREACH_DEV(pi) {
1604                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1605                         continue;
1606
1607                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1608                         printf("Please remove port %d from forwarding configuration.\n", pi);
1609                         continue;
1610                 }
1611
1612                 if (port_is_bonding_slave(pi)) {
1613                         printf("Please remove port %d from bonded device.\n", pi);
1614                         continue;
1615                 }
1616
1617                 port = &ports[pi];
1618                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1619                                                 RTE_PORT_HANDLING) == 0)
1620                         continue;
1621
1622                 rte_eth_dev_stop(pi);
1623
1624                 if (rte_atomic16_cmpset(&(port->port_status),
1625                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1626                         printf("Port %d can not be set into stopped\n", pi);
1627                 need_check_link_status = 1;
1628         }
1629         if (need_check_link_status && !no_link_check)
1630                 check_all_ports_link_status(RTE_PORT_ALL);
1631
1632         printf("Done\n");
1633 }
1634
1635 void
1636 close_port(portid_t pid)
1637 {
1638         portid_t pi;
1639         struct rte_port *port;
1640
1641         if (port_id_is_invalid(pid, ENABLED_WARN))
1642                 return;
1643
1644         printf("Closing ports...\n");
1645
1646         RTE_ETH_FOREACH_DEV(pi) {
1647                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1648                         continue;
1649
1650                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1651                         printf("Please remove port %d from forwarding configuration.\n", pi);
1652                         continue;
1653                 }
1654
1655                 if (port_is_bonding_slave(pi)) {
1656                         printf("Please remove port %d from bonded device.\n", pi);
1657                         continue;
1658                 }
1659
1660                 port = &ports[pi];
1661                 if (rte_atomic16_cmpset(&(port->port_status),
1662                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1663                         printf("Port %d is already closed\n", pi);
1664                         continue;
1665                 }
1666
1667                 if (rte_atomic16_cmpset(&(port->port_status),
1668                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1669                         printf("Port %d is now not stopped\n", pi);
1670                         continue;
1671                 }
1672
1673                 if (port->flow_list)
1674                         port_flow_flush(pi);
1675                 rte_eth_dev_close(pi);
1676
1677                 if (rte_atomic16_cmpset(&(port->port_status),
1678                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1679                         printf("Port %d cannot be set to closed\n", pi);
1680         }
1681
1682         printf("Done\n");
1683 }
1684
1685 void
1686 attach_port(char *identifier)
1687 {
1688         portid_t pi = 0;
1689         unsigned int socket_id;
1690
1691         printf("Attaching a new port...\n");
1692
1693         if (identifier == NULL) {
1694                 printf("Invalid parameters are specified\n");
1695                 return;
1696         }
1697
1698         if (rte_eth_dev_attach(identifier, &pi))
1699                 return;
1700
1701         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1702         /* if socket_id is invalid, set to 0 */
1703         if (check_socket_id(socket_id) < 0)
1704                 socket_id = 0;
1705         reconfig(pi, socket_id);
1706         rte_eth_promiscuous_enable(pi);
1707
1708         nb_ports = rte_eth_dev_count();
1709
1710         ports[pi].port_status = RTE_PORT_STOPPED;
1711
1712         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1713         printf("Done\n");
1714 }
1715
1716 void
1717 detach_port(uint8_t port_id)
1718 {
1719         char name[RTE_ETH_NAME_MAX_LEN];
1720
1721         printf("Detaching a port...\n");
1722
1723         if (!port_is_closed(port_id)) {
1724                 printf("Please close port first\n");
1725                 return;
1726         }
1727
1728         if (ports[port_id].flow_list)
1729                 port_flow_flush(port_id);
1730
1731         if (rte_eth_dev_detach(port_id, name))
1732                 return;
1733
1734         nb_ports = rte_eth_dev_count();
1735
1736         printf("Port '%s' is detached. Now total ports is %d\n",
1737                         name, nb_ports);
1738         printf("Done\n");
1739         return;
1740 }
1741
1742 void
1743 pmd_test_exit(void)
1744 {
1745         portid_t pt_id;
1746
1747         if (test_done == 0)
1748                 stop_packet_forwarding();
1749
1750         if (ports != NULL) {
1751                 no_link_check = 1;
1752                 RTE_ETH_FOREACH_DEV(pt_id) {
1753                         printf("\nShutting down port %d...\n", pt_id);
1754                         fflush(stdout);
1755                         stop_port(pt_id);
1756                         close_port(pt_id);
1757                 }
1758         }
1759         printf("\nBye...\n");
1760 }
1761
1762 typedef void (*cmd_func_t)(void);
1763 struct pmd_test_command {
1764         const char *cmd_name;
1765         cmd_func_t cmd_func;
1766 };
1767
1768 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1769
1770 /* Check the link status of all ports in up to 9s, and print them finally */
1771 static void
1772 check_all_ports_link_status(uint32_t port_mask)
1773 {
1774 #define CHECK_INTERVAL 100 /* 100ms */
1775 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1776         uint8_t portid, count, all_ports_up, print_flag = 0;
1777         struct rte_eth_link link;
1778
1779         printf("Checking link statuses...\n");
1780         fflush(stdout);
1781         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1782                 all_ports_up = 1;
1783                 RTE_ETH_FOREACH_DEV(portid) {
1784                         if ((port_mask & (1 << portid)) == 0)
1785                                 continue;
1786                         memset(&link, 0, sizeof(link));
1787                         rte_eth_link_get_nowait(portid, &link);
1788                         /* print link status if flag set */
1789                         if (print_flag == 1) {
1790                                 if (link.link_status)
1791                                         printf("Port %d Link Up - speed %u "
1792                                                 "Mbps - %s\n", (uint8_t)portid,
1793                                                 (unsigned)link.link_speed,
1794                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1795                                         ("full-duplex") : ("half-duplex\n"));
1796                                 else
1797                                         printf("Port %d Link Down\n",
1798                                                 (uint8_t)portid);
1799                                 continue;
1800                         }
1801                         /* clear all_ports_up flag if any link down */
1802                         if (link.link_status == ETH_LINK_DOWN) {
1803                                 all_ports_up = 0;
1804                                 break;
1805                         }
1806                 }
1807                 /* after finally printing all link status, get out */
1808                 if (print_flag == 1)
1809                         break;
1810
1811                 if (all_ports_up == 0) {
1812                         fflush(stdout);
1813                         rte_delay_ms(CHECK_INTERVAL);
1814                 }
1815
1816                 /* set the print_flag if all ports up or timeout */
1817                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1818                         print_flag = 1;
1819                 }
1820
1821                 if (lsc_interrupt)
1822                         break;
1823         }
1824 }
1825
1826 static void
1827 rmv_event_callback(void *arg)
1828 {
1829         struct rte_eth_dev *dev;
1830         uint8_t port_id = (intptr_t)arg;
1831
1832         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1833         dev = &rte_eth_devices[port_id];
1834
1835         stop_port(port_id);
1836         close_port(port_id);
1837         printf("removing device %s\n", dev->device->name);
1838         rte_eal_dev_detach(dev->device);
1839 }
1840
1841 /* This function is used by the interrupt thread */
1842 static int
1843 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1844                   void *ret_param)
1845 {
1846         static const char * const event_desc[] = {
1847                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1848                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1849                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1850                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1851                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1852                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1853                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1854                 [RTE_ETH_EVENT_MAX] = NULL,
1855         };
1856
1857         RTE_SET_USED(param);
1858         RTE_SET_USED(ret_param);
1859
1860         if (type >= RTE_ETH_EVENT_MAX) {
1861                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1862                         port_id, __func__, type);
1863                 fflush(stderr);
1864         } else if (event_print_mask & (UINT32_C(1) << type)) {
1865                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1866                         event_desc[type]);
1867                 fflush(stdout);
1868         }
1869
1870         switch (type) {
1871         case RTE_ETH_EVENT_INTR_RMV:
1872                 if (rte_eal_alarm_set(100000,
1873                                 rmv_event_callback, (void *)(intptr_t)port_id))
1874                         fprintf(stderr, "Could not set up deferred device removal\n");
1875                 break;
1876         default:
1877                 break;
1878         }
1879         return 0;
1880 }
1881
1882 static int
1883 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1884 {
1885         uint16_t i;
1886         int diag;
1887         uint8_t mapping_found = 0;
1888
1889         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1890                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1891                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1892                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1893                                         tx_queue_stats_mappings[i].queue_id,
1894                                         tx_queue_stats_mappings[i].stats_counter_id);
1895                         if (diag != 0)
1896                                 return diag;
1897                         mapping_found = 1;
1898                 }
1899         }
1900         if (mapping_found)
1901                 port->tx_queue_stats_mapping_enabled = 1;
1902         return 0;
1903 }
1904
1905 static int
1906 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1907 {
1908         uint16_t i;
1909         int diag;
1910         uint8_t mapping_found = 0;
1911
1912         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1913                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1914                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1915                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1916                                         rx_queue_stats_mappings[i].queue_id,
1917                                         rx_queue_stats_mappings[i].stats_counter_id);
1918                         if (diag != 0)
1919                                 return diag;
1920                         mapping_found = 1;
1921                 }
1922         }
1923         if (mapping_found)
1924                 port->rx_queue_stats_mapping_enabled = 1;
1925         return 0;
1926 }
1927
1928 static void
1929 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1930 {
1931         int diag = 0;
1932
1933         diag = set_tx_queue_stats_mapping_registers(pi, port);
1934         if (diag != 0) {
1935                 if (diag == -ENOTSUP) {
1936                         port->tx_queue_stats_mapping_enabled = 0;
1937                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1938                 }
1939                 else
1940                         rte_exit(EXIT_FAILURE,
1941                                         "set_tx_queue_stats_mapping_registers "
1942                                         "failed for port id=%d diag=%d\n",
1943                                         pi, diag);
1944         }
1945
1946         diag = set_rx_queue_stats_mapping_registers(pi, port);
1947         if (diag != 0) {
1948                 if (diag == -ENOTSUP) {
1949                         port->rx_queue_stats_mapping_enabled = 0;
1950                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1951                 }
1952                 else
1953                         rte_exit(EXIT_FAILURE,
1954                                         "set_rx_queue_stats_mapping_registers "
1955                                         "failed for port id=%d diag=%d\n",
1956                                         pi, diag);
1957         }
1958 }
1959
1960 static void
1961 rxtx_port_config(struct rte_port *port)
1962 {
1963         port->rx_conf = port->dev_info.default_rxconf;
1964         port->tx_conf = port->dev_info.default_txconf;
1965
1966         /* Check if any RX/TX parameters have been passed */
1967         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1968                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1969
1970         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1971                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1972
1973         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1974                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1975
1976         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1977                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1978
1979         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1980                 port->rx_conf.rx_drop_en = rx_drop_en;
1981
1982         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1983                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1984
1985         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1986                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1987
1988         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1989                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1990
1991         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1992                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1993
1994         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1995                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1996
1997         if (txq_flags != RTE_PMD_PARAM_UNSET)
1998                 port->tx_conf.txq_flags = txq_flags;
1999 }
2000
2001 void
2002 init_port_config(void)
2003 {
2004         portid_t pid;
2005         struct rte_port *port;
2006
2007         RTE_ETH_FOREACH_DEV(pid) {
2008                 port = &ports[pid];
2009                 port->dev_conf.rxmode = rx_mode;
2010                 port->dev_conf.fdir_conf = fdir_conf;
2011                 if (nb_rxq > 1) {
2012                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2013                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2014                 } else {
2015                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2016                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2017                 }
2018
2019                 if (port->dcb_flag == 0) {
2020                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2021                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2022                         else
2023                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2024                 }
2025
2026                 rxtx_port_config(port);
2027
2028                 rte_eth_macaddr_get(pid, &port->eth_addr);
2029
2030                 map_port_queue_stats_mapping_registers(pid, port);
2031 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2032                 rte_pmd_ixgbe_bypass_init(pid);
2033 #endif
2034
2035                 if (lsc_interrupt &&
2036                     (rte_eth_devices[pid].data->dev_flags &
2037                      RTE_ETH_DEV_INTR_LSC))
2038                         port->dev_conf.intr_conf.lsc = 1;
2039                 if (rmv_interrupt &&
2040                     (rte_eth_devices[pid].data->dev_flags &
2041                      RTE_ETH_DEV_INTR_RMV))
2042                         port->dev_conf.intr_conf.rmv = 1;
2043         }
2044 }
2045
2046 void set_port_slave_flag(portid_t slave_pid)
2047 {
2048         struct rte_port *port;
2049
2050         port = &ports[slave_pid];
2051         port->slave_flag = 1;
2052 }
2053
2054 void clear_port_slave_flag(portid_t slave_pid)
2055 {
2056         struct rte_port *port;
2057
2058         port = &ports[slave_pid];
2059         port->slave_flag = 0;
2060 }
2061
2062 uint8_t port_is_bonding_slave(portid_t slave_pid)
2063 {
2064         struct rte_port *port;
2065
2066         port = &ports[slave_pid];
2067         return port->slave_flag;
2068 }
2069
2070 const uint16_t vlan_tags[] = {
2071                 0,  1,  2,  3,  4,  5,  6,  7,
2072                 8,  9, 10, 11,  12, 13, 14, 15,
2073                 16, 17, 18, 19, 20, 21, 22, 23,
2074                 24, 25, 26, 27, 28, 29, 30, 31
2075 };
2076
2077 static  int
2078 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2079                  enum dcb_mode_enable dcb_mode,
2080                  enum rte_eth_nb_tcs num_tcs,
2081                  uint8_t pfc_en)
2082 {
2083         uint8_t i;
2084
2085         /*
2086          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2087          * given above, and the number of traffic classes available for use.
2088          */
2089         if (dcb_mode == DCB_VT_ENABLED) {
2090                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2091                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2092                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2093                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2094
2095                 /* VMDQ+DCB RX and TX configurations */
2096                 vmdq_rx_conf->enable_default_pool = 0;
2097                 vmdq_rx_conf->default_pool = 0;
2098                 vmdq_rx_conf->nb_queue_pools =
2099                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2100                 vmdq_tx_conf->nb_queue_pools =
2101                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2102
2103                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2104                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2105                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2106                         vmdq_rx_conf->pool_map[i].pools =
2107                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2108                 }
2109                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2110                         vmdq_rx_conf->dcb_tc[i] = i;
2111                         vmdq_tx_conf->dcb_tc[i] = i;
2112                 }
2113
2114                 /* set DCB mode of RX and TX of multiple queues */
2115                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2116                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2117         } else {
2118                 struct rte_eth_dcb_rx_conf *rx_conf =
2119                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2120                 struct rte_eth_dcb_tx_conf *tx_conf =
2121                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2122
2123                 rx_conf->nb_tcs = num_tcs;
2124                 tx_conf->nb_tcs = num_tcs;
2125
2126                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2127                         rx_conf->dcb_tc[i] = i % num_tcs;
2128                         tx_conf->dcb_tc[i] = i % num_tcs;
2129                 }
2130                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2131                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2132                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2133         }
2134
2135         if (pfc_en)
2136                 eth_conf->dcb_capability_en =
2137                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2138         else
2139                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2140
2141         return 0;
2142 }
2143
2144 int
2145 init_port_dcb_config(portid_t pid,
2146                      enum dcb_mode_enable dcb_mode,
2147                      enum rte_eth_nb_tcs num_tcs,
2148                      uint8_t pfc_en)
2149 {
2150         struct rte_eth_conf port_conf;
2151         struct rte_port *rte_port;
2152         int retval;
2153         uint16_t i;
2154
2155         rte_port = &ports[pid];
2156
2157         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2158         /* Enter DCB configuration status */
2159         dcb_config = 1;
2160
2161         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2162         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2163         if (retval < 0)
2164                 return retval;
2165         port_conf.rxmode.hw_vlan_filter = 1;
2166
2167         /**
2168          * Write the configuration into the device.
2169          * Set the numbers of RX & TX queues to 0, so
2170          * the RX & TX queues will not be setup.
2171          */
2172         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2173
2174         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2175
2176         /* If dev_info.vmdq_pool_base is greater than 0,
2177          * the queue id of vmdq pools is started after pf queues.
2178          */
2179         if (dcb_mode == DCB_VT_ENABLED &&
2180             rte_port->dev_info.vmdq_pool_base > 0) {
2181                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2182                         " for port %d.", pid);
2183                 return -1;
2184         }
2185
2186         /* Assume the ports in testpmd have the same dcb capability
2187          * and has the same number of rxq and txq in dcb mode
2188          */
2189         if (dcb_mode == DCB_VT_ENABLED) {
2190                 if (rte_port->dev_info.max_vfs > 0) {
2191                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2192                         nb_txq = rte_port->dev_info.nb_tx_queues;
2193                 } else {
2194                         nb_rxq = rte_port->dev_info.max_rx_queues;
2195                         nb_txq = rte_port->dev_info.max_tx_queues;
2196                 }
2197         } else {
2198                 /*if vt is disabled, use all pf queues */
2199                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2200                         nb_rxq = rte_port->dev_info.max_rx_queues;
2201                         nb_txq = rte_port->dev_info.max_tx_queues;
2202                 } else {
2203                         nb_rxq = (queueid_t)num_tcs;
2204                         nb_txq = (queueid_t)num_tcs;
2205
2206                 }
2207         }
2208         rx_free_thresh = 64;
2209
2210         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2211
2212         rxtx_port_config(rte_port);
2213         /* VLAN filter */
2214         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2215         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2216                 rx_vft_set(pid, vlan_tags[i], 1);
2217
2218         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2219         map_port_queue_stats_mapping_registers(pid, rte_port);
2220
2221         rte_port->dcb_flag = 1;
2222
2223         return 0;
2224 }
2225
2226 static void
2227 init_port(void)
2228 {
2229         /* Configuration of Ethernet ports. */
2230         ports = rte_zmalloc("testpmd: ports",
2231                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2232                             RTE_CACHE_LINE_SIZE);
2233         if (ports == NULL) {
2234                 rte_exit(EXIT_FAILURE,
2235                                 "rte_zmalloc(%d struct rte_port) failed\n",
2236                                 RTE_MAX_ETHPORTS);
2237         }
2238 }
2239
2240 static void
2241 force_quit(void)
2242 {
2243         pmd_test_exit();
2244         prompt_exit();
2245 }
2246
2247 static void
2248 print_stats(void)
2249 {
2250         uint8_t i;
2251         const char clr[] = { 27, '[', '2', 'J', '\0' };
2252         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2253
2254         /* Clear screen and move to top left */
2255         printf("%s%s", clr, top_left);
2256
2257         printf("\nPort statistics ====================================");
2258         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2259                 nic_stats_display(fwd_ports_ids[i]);
2260 }
2261
2262 static void
2263 signal_handler(int signum)
2264 {
2265         if (signum == SIGINT || signum == SIGTERM) {
2266                 printf("\nSignal %d received, preparing to exit...\n",
2267                                 signum);
2268 #ifdef RTE_LIBRTE_PDUMP
2269                 /* uninitialize packet capture framework */
2270                 rte_pdump_uninit();
2271 #endif
2272 #ifdef RTE_LIBRTE_LATENCY_STATS
2273                 rte_latencystats_uninit();
2274 #endif
2275                 force_quit();
2276                 /* exit with the expected status */
2277                 signal(signum, SIG_DFL);
2278                 kill(getpid(), signum);
2279         }
2280 }
2281
2282 int
2283 main(int argc, char** argv)
2284 {
2285         int  diag;
2286         uint8_t port_id;
2287
2288         signal(SIGINT, signal_handler);
2289         signal(SIGTERM, signal_handler);
2290
2291         diag = rte_eal_init(argc, argv);
2292         if (diag < 0)
2293                 rte_panic("Cannot init EAL\n");
2294
2295 #ifdef RTE_LIBRTE_PDUMP
2296         /* initialize packet capture framework */
2297         rte_pdump_init(NULL);
2298 #endif
2299
2300         nb_ports = (portid_t) rte_eth_dev_count();
2301         if (nb_ports == 0)
2302                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2303
2304         /* allocate port structures, and init them */
2305         init_port();
2306
2307         set_def_fwd_config();
2308         if (nb_lcores == 0)
2309                 rte_panic("Empty set of forwarding logical cores - check the "
2310                           "core mask supplied in the command parameters\n");
2311
2312         /* Bitrate/latency stats disabled by default */
2313 #ifdef RTE_LIBRTE_BITRATE
2314         bitrate_enabled = 0;
2315 #endif
2316 #ifdef RTE_LIBRTE_LATENCY_STATS
2317         latencystats_enabled = 0;
2318 #endif
2319
2320         argc -= diag;
2321         argv += diag;
2322         if (argc > 1)
2323                 launch_args_parse(argc, argv);
2324
2325         if (tx_first && interactive)
2326                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2327                                 "interactive mode.\n");
2328         if (!nb_rxq && !nb_txq)
2329                 printf("Warning: Either rx or tx queues should be non-zero\n");
2330
2331         if (nb_rxq > 1 && nb_rxq > nb_txq)
2332                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2333                        "but nb_txq=%d will prevent to fully test it.\n",
2334                        nb_rxq, nb_txq);
2335
2336         init_config();
2337         if (start_port(RTE_PORT_ALL) != 0)
2338                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2339
2340         /* set all ports to promiscuous mode by default */
2341         RTE_ETH_FOREACH_DEV(port_id)
2342                 rte_eth_promiscuous_enable(port_id);
2343
2344         /* Init metrics library */
2345         rte_metrics_init(rte_socket_id());
2346
2347 #ifdef RTE_LIBRTE_LATENCY_STATS
2348         if (latencystats_enabled != 0) {
2349                 int ret = rte_latencystats_init(1, NULL);
2350                 if (ret)
2351                         printf("Warning: latencystats init()"
2352                                 " returned error %d\n", ret);
2353                 printf("Latencystats running on lcore %d\n",
2354                         latencystats_lcore_id);
2355         }
2356 #endif
2357
2358         /* Setup bitrate stats */
2359 #ifdef RTE_LIBRTE_BITRATE
2360         if (bitrate_enabled != 0) {
2361                 bitrate_data = rte_stats_bitrate_create();
2362                 if (bitrate_data == NULL)
2363                         rte_exit(EXIT_FAILURE,
2364                                 "Could not allocate bitrate data.\n");
2365                 rte_stats_bitrate_reg(bitrate_data);
2366         }
2367 #endif
2368
2369 #ifdef RTE_LIBRTE_CMDLINE
2370         if (strlen(cmdline_filename) != 0)
2371                 cmdline_read_from_file(cmdline_filename);
2372
2373         if (interactive == 1) {
2374                 if (auto_start) {
2375                         printf("Start automatic packet forwarding\n");
2376                         start_packet_forwarding(0);
2377                 }
2378                 prompt();
2379                 pmd_test_exit();
2380         } else
2381 #endif
2382         {
2383                 char c;
2384                 int rc;
2385
2386                 printf("No commandline core given, start packet forwarding\n");
2387                 start_packet_forwarding(tx_first);
2388                 if (stats_period != 0) {
2389                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2390                         uint64_t timer_period;
2391
2392                         /* Convert to number of cycles */
2393                         timer_period = stats_period * rte_get_timer_hz();
2394
2395                         while (1) {
2396                                 cur_time = rte_get_timer_cycles();
2397                                 diff_time += cur_time - prev_time;
2398
2399                                 if (diff_time >= timer_period) {
2400                                         print_stats();
2401                                         /* Reset the timer */
2402                                         diff_time = 0;
2403                                 }
2404                                 /* Sleep to avoid unnecessary checks */
2405                                 prev_time = cur_time;
2406                                 sleep(1);
2407                         }
2408                 }
2409
2410                 printf("Press enter to exit\n");
2411                 rc = read(0, &c, 1);
2412                 pmd_test_exit();
2413                 if (rc < 0)
2414                         return 1;
2415         }
2416
2417         return 0;
2418 }