1344016032196c96b348b528fd6b62fc994ac451
[dpdk.git] / app / test-pmd / testpmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63
64 #include "testpmd.h"
65
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;        /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150         &io_fwd_engine,
151         &mac_fwd_engine,
152         &mac_swap_engine,
153         &flow_gen_engine,
154         &rx_only_engine,
155         &tx_only_engine,
156         &csum_fwd_engine,
157         &icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159         &softnic_tm_engine,
160         &softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163         &ieee1588_fwd_engine,
164 #endif
165         NULL,
166 };
167
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190         TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299                             (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306
307 /*
308  * NIC bypass mode configuration options.
309  */
310
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315
316
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328
329 #endif
330
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336         .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337         .ignore_offload_bitfield = 1,
338 };
339
340 struct rte_eth_txmode tx_mode = {
341         .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343
344 struct rte_fdir_conf fdir_conf = {
345         .mode = RTE_FDIR_MODE_NONE,
346         .pballoc = RTE_FDIR_PBALLOC_64K,
347         .status = RTE_FDIR_REPORT_STATUS,
348         .mask = {
349                 .vlan_tci_mask = 0x0,
350                 .ipv4_mask     = {
351                         .src_ip = 0xFFFFFFFF,
352                         .dst_ip = 0xFFFFFFFF,
353                 },
354                 .ipv6_mask     = {
355                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357                 },
358                 .src_port_mask = 0xFFFF,
359                 .dst_port_mask = 0xFFFF,
360                 .mac_addr_byte_mask = 0xFF,
361                 .tunnel_type_mask = 1,
362                 .tunnel_id_mask = 0xFFFFFFFF,
363         },
364         .drop_queue = 127,
365 };
366
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398                                                    struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401                               enum rte_eth_event_type type,
402                               void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404                                 enum rte_dev_event_type type,
405                                 void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408
409
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426         unsigned int i;
427
428         for (i = 0; i < num_sockets; i++) {
429                 if (socket_ids[i] == socket_id)
430                         return 0;
431         }
432         return 1;
433 }
434
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441         unsigned int i;
442         unsigned int nb_lc;
443         unsigned int sock_num;
444
445         nb_lc = 0;
446         for (i = 0; i < RTE_MAX_LCORE; i++) {
447                 sock_num = rte_lcore_to_socket_id(i);
448                 if (new_socket_id(sock_num)) {
449                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
450                                 rte_exit(EXIT_FAILURE,
451                                          "Total sockets greater than %u\n",
452                                          RTE_MAX_NUMA_NODES);
453                         }
454                         socket_ids[num_sockets++] = sock_num;
455                 }
456                 if (!rte_lcore_is_enabled(i))
457                         continue;
458                 if (i == rte_get_master_lcore())
459                         continue;
460                 fwd_lcores_cpuids[nb_lc++] = i;
461         }
462         nb_lcores = (lcoreid_t) nb_lc;
463         nb_cfg_lcores = nb_lcores;
464         nb_fwd_lcores = 1;
465 }
466
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470         portid_t i;
471
472         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474                 peer_eth_addrs[i].addr_bytes[5] = i;
475         }
476 }
477
478 static void
479 set_default_fwd_ports_config(void)
480 {
481         portid_t pt_id;
482         int i = 0;
483
484         RTE_ETH_FOREACH_DEV(pt_id)
485                 fwd_ports_ids[i++] = pt_id;
486
487         nb_cfg_ports = nb_ports;
488         nb_fwd_ports = nb_ports;
489 }
490
491 void
492 set_def_fwd_config(void)
493 {
494         set_default_fwd_lcores_config();
495         set_def_peer_eth_addrs();
496         set_default_fwd_ports_config();
497 }
498
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504                  unsigned int socket_id)
505 {
506         char pool_name[RTE_MEMPOOL_NAMESIZE];
507         struct rte_mempool *rte_mp = NULL;
508         uint32_t mb_size;
509
510         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512
513         TESTPMD_LOG(INFO,
514                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516
517         if (mp_anon != 0) {
518                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519                         mb_size, (unsigned) mb_mempool_cache,
520                         sizeof(struct rte_pktmbuf_pool_private),
521                         socket_id, 0);
522                 if (rte_mp == NULL)
523                         goto err;
524
525                 if (rte_mempool_populate_anon(rte_mp) == 0) {
526                         rte_mempool_free(rte_mp);
527                         rte_mp = NULL;
528                         goto err;
529                 }
530                 rte_pktmbuf_pool_init(rte_mp, NULL);
531                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532         } else {
533                 /* wrapper to rte_mempool_create() */
534                 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535                                 rte_mbuf_best_mempool_ops());
536                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538         }
539
540 err:
541         if (rte_mp == NULL) {
542                 rte_exit(EXIT_FAILURE,
543                         "Creation of mbuf pool for socket %u failed: %s\n",
544                         socket_id, rte_strerror(rte_errno));
545         } else if (verbose_level > 0) {
546                 rte_mempool_dump(stdout, rte_mp);
547         }
548 }
549
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557         static int warning_once = 0;
558
559         if (new_socket_id(socket_id)) {
560                 if (!warning_once && numa_support)
561                         printf("Warning: NUMA should be configured manually by"
562                                " using --port-numa-config and"
563                                " --ring-numa-config parameters along with"
564                                " --numa.\n");
565                 warning_once = 1;
566                 return -1;
567         }
568         return 0;
569 }
570
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580         portid_t pi;
581         struct rte_eth_dev_info dev_info;
582
583         RTE_ETH_FOREACH_DEV(pi) {
584                 rte_eth_dev_info_get(pi, &dev_info);
585                 if (dev_info.max_rx_queues < allowed_max_rxq) {
586                         allowed_max_rxq = dev_info.max_rx_queues;
587                         *pid = pi;
588                 }
589         }
590         return allowed_max_rxq;
591 }
592
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602         queueid_t allowed_max_rxq;
603         portid_t pid = 0;
604
605         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606         if (rxq > allowed_max_rxq) {
607                 printf("Fail: input rxq (%u) can't be greater "
608                        "than max_rx_queues (%u) of port %u\n",
609                        rxq,
610                        allowed_max_rxq,
611                        pid);
612                 return -1;
613         }
614         return 0;
615 }
616
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625         queueid_t allowed_max_txq = MAX_QUEUE_ID;
626         portid_t pi;
627         struct rte_eth_dev_info dev_info;
628
629         RTE_ETH_FOREACH_DEV(pi) {
630                 rte_eth_dev_info_get(pi, &dev_info);
631                 if (dev_info.max_tx_queues < allowed_max_txq) {
632                         allowed_max_txq = dev_info.max_tx_queues;
633                         *pid = pi;
634                 }
635         }
636         return allowed_max_txq;
637 }
638
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648         queueid_t allowed_max_txq;
649         portid_t pid = 0;
650
651         allowed_max_txq = get_allowed_max_nb_txq(&pid);
652         if (txq > allowed_max_txq) {
653                 printf("Fail: input txq (%u) can't be greater "
654                        "than max_tx_queues (%u) of port %u\n",
655                        txq,
656                        allowed_max_txq,
657                        pid);
658                 return -1;
659         }
660         return 0;
661 }
662
663 static void
664 init_config(void)
665 {
666         portid_t pid;
667         struct rte_port *port;
668         struct rte_mempool *mbp;
669         unsigned int nb_mbuf_per_pool;
670         lcoreid_t  lc_id;
671         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672         struct rte_gro_param gro_param;
673         uint32_t gso_types;
674         int k;
675
676         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
677
678         if (numa_support) {
679                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
682         }
683
684         /* Configuration of logical cores. */
685         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
686                                 sizeof(struct fwd_lcore *) * nb_lcores,
687                                 RTE_CACHE_LINE_SIZE);
688         if (fwd_lcores == NULL) {
689                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
690                                                         "failed\n", nb_lcores);
691         }
692         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
693                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
694                                                sizeof(struct fwd_lcore),
695                                                RTE_CACHE_LINE_SIZE);
696                 if (fwd_lcores[lc_id] == NULL) {
697                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
698                                                                 "failed\n");
699                 }
700                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
701         }
702
703         RTE_ETH_FOREACH_DEV(pid) {
704                 port = &ports[pid];
705                 /* Apply default TxRx configuration for all ports */
706                 port->dev_conf.txmode = tx_mode;
707                 port->dev_conf.rxmode = rx_mode;
708                 rte_eth_dev_info_get(pid, &port->dev_info);
709
710                 if (!(port->dev_info.rx_offload_capa &
711                                         DEV_RX_OFFLOAD_CRC_STRIP))
712                         port->dev_conf.rxmode.offloads &=
713                                 ~DEV_RX_OFFLOAD_CRC_STRIP;
714                 if (!(port->dev_info.tx_offload_capa &
715                       DEV_TX_OFFLOAD_MBUF_FAST_FREE))
716                         port->dev_conf.txmode.offloads &=
717                                 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
718                 if (numa_support) {
719                         if (port_numa[pid] != NUMA_NO_CONFIG)
720                                 port_per_socket[port_numa[pid]]++;
721                         else {
722                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
723
724                                 /* if socket_id is invalid, set to 0 */
725                                 if (check_socket_id(socket_id) < 0)
726                                         socket_id = 0;
727                                 port_per_socket[socket_id]++;
728                         }
729                 }
730
731                 /* Apply Rx offloads configuration */
732                 for (k = 0; k < port->dev_info.max_rx_queues; k++)
733                         port->rx_conf[k].offloads =
734                                 port->dev_conf.rxmode.offloads;
735                 /* Apply Tx offloads configuration */
736                 for (k = 0; k < port->dev_info.max_tx_queues; k++)
737                         port->tx_conf[k].offloads =
738                                 port->dev_conf.txmode.offloads;
739
740                 /* set flag to initialize port/queue */
741                 port->need_reconfig = 1;
742                 port->need_reconfig_queues = 1;
743         }
744
745         /*
746          * Create pools of mbuf.
747          * If NUMA support is disabled, create a single pool of mbuf in
748          * socket 0 memory by default.
749          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
750          *
751          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
752          * nb_txd can be configured at run time.
753          */
754         if (param_total_num_mbufs)
755                 nb_mbuf_per_pool = param_total_num_mbufs;
756         else {
757                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
758                         (nb_lcores * mb_mempool_cache) +
759                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
760                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
761         }
762
763         if (numa_support) {
764                 uint8_t i;
765
766                 for (i = 0; i < num_sockets; i++)
767                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
768                                          socket_ids[i]);
769         } else {
770                 if (socket_num == UMA_NO_CONFIG)
771                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
772                 else
773                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
774                                                  socket_num);
775         }
776
777         init_port_config();
778
779         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
780                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
781         /*
782          * Records which Mbuf pool to use by each logical core, if needed.
783          */
784         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
785                 mbp = mbuf_pool_find(
786                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
787
788                 if (mbp == NULL)
789                         mbp = mbuf_pool_find(0);
790                 fwd_lcores[lc_id]->mbp = mbp;
791                 /* initialize GSO context */
792                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
793                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
794                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
795                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
796                         ETHER_CRC_LEN;
797                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
798         }
799
800         /* Configuration of packet forwarding streams. */
801         if (init_fwd_streams() < 0)
802                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
803
804         fwd_config_setup();
805
806         /* create a gro context for each lcore */
807         gro_param.gro_types = RTE_GRO_TCP_IPV4;
808         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
809         gro_param.max_item_per_flow = MAX_PKT_BURST;
810         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
811                 gro_param.socket_id = rte_lcore_to_socket_id(
812                                 fwd_lcores_cpuids[lc_id]);
813                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
814                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
815                         rte_exit(EXIT_FAILURE,
816                                         "rte_gro_ctx_create() failed\n");
817                 }
818         }
819 }
820
821
822 void
823 reconfig(portid_t new_port_id, unsigned socket_id)
824 {
825         struct rte_port *port;
826
827         /* Reconfiguration of Ethernet ports. */
828         port = &ports[new_port_id];
829         rte_eth_dev_info_get(new_port_id, &port->dev_info);
830
831         /* set flag to initialize port/queue */
832         port->need_reconfig = 1;
833         port->need_reconfig_queues = 1;
834         port->socket_id = socket_id;
835
836         init_port_config();
837 }
838
839
840 int
841 init_fwd_streams(void)
842 {
843         portid_t pid;
844         struct rte_port *port;
845         streamid_t sm_id, nb_fwd_streams_new;
846         queueid_t q;
847
848         /* set socket id according to numa or not */
849         RTE_ETH_FOREACH_DEV(pid) {
850                 port = &ports[pid];
851                 if (nb_rxq > port->dev_info.max_rx_queues) {
852                         printf("Fail: nb_rxq(%d) is greater than "
853                                 "max_rx_queues(%d)\n", nb_rxq,
854                                 port->dev_info.max_rx_queues);
855                         return -1;
856                 }
857                 if (nb_txq > port->dev_info.max_tx_queues) {
858                         printf("Fail: nb_txq(%d) is greater than "
859                                 "max_tx_queues(%d)\n", nb_txq,
860                                 port->dev_info.max_tx_queues);
861                         return -1;
862                 }
863                 if (numa_support) {
864                         if (port_numa[pid] != NUMA_NO_CONFIG)
865                                 port->socket_id = port_numa[pid];
866                         else {
867                                 port->socket_id = rte_eth_dev_socket_id(pid);
868
869                                 /* if socket_id is invalid, set to 0 */
870                                 if (check_socket_id(port->socket_id) < 0)
871                                         port->socket_id = 0;
872                         }
873                 }
874                 else {
875                         if (socket_num == UMA_NO_CONFIG)
876                                 port->socket_id = 0;
877                         else
878                                 port->socket_id = socket_num;
879                 }
880         }
881
882         q = RTE_MAX(nb_rxq, nb_txq);
883         if (q == 0) {
884                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
885                 return -1;
886         }
887         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
888         if (nb_fwd_streams_new == nb_fwd_streams)
889                 return 0;
890         /* clear the old */
891         if (fwd_streams != NULL) {
892                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
893                         if (fwd_streams[sm_id] == NULL)
894                                 continue;
895                         rte_free(fwd_streams[sm_id]);
896                         fwd_streams[sm_id] = NULL;
897                 }
898                 rte_free(fwd_streams);
899                 fwd_streams = NULL;
900         }
901
902         /* init new */
903         nb_fwd_streams = nb_fwd_streams_new;
904         if (nb_fwd_streams) {
905                 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
906                         sizeof(struct fwd_stream *) * nb_fwd_streams,
907                         RTE_CACHE_LINE_SIZE);
908                 if (fwd_streams == NULL)
909                         rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
910                                  " (struct fwd_stream *)) failed\n",
911                                  nb_fwd_streams);
912
913                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
914                         fwd_streams[sm_id] = rte_zmalloc("testpmd:"
915                                 " struct fwd_stream", sizeof(struct fwd_stream),
916                                 RTE_CACHE_LINE_SIZE);
917                         if (fwd_streams[sm_id] == NULL)
918                                 rte_exit(EXIT_FAILURE, "rte_zmalloc"
919                                          "(struct fwd_stream) failed\n");
920                 }
921         }
922
923         return 0;
924 }
925
926 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
927 static void
928 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
929 {
930         unsigned int total_burst;
931         unsigned int nb_burst;
932         unsigned int burst_stats[3];
933         uint16_t pktnb_stats[3];
934         uint16_t nb_pkt;
935         int burst_percent[3];
936
937         /*
938          * First compute the total number of packet bursts and the
939          * two highest numbers of bursts of the same number of packets.
940          */
941         total_burst = 0;
942         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
943         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
944         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
945                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
946                 if (nb_burst == 0)
947                         continue;
948                 total_burst += nb_burst;
949                 if (nb_burst > burst_stats[0]) {
950                         burst_stats[1] = burst_stats[0];
951                         pktnb_stats[1] = pktnb_stats[0];
952                         burst_stats[0] = nb_burst;
953                         pktnb_stats[0] = nb_pkt;
954                 }
955         }
956         if (total_burst == 0)
957                 return;
958         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
959         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
960                burst_percent[0], (int) pktnb_stats[0]);
961         if (burst_stats[0] == total_burst) {
962                 printf("]\n");
963                 return;
964         }
965         if (burst_stats[0] + burst_stats[1] == total_burst) {
966                 printf(" + %d%% of %d pkts]\n",
967                        100 - burst_percent[0], pktnb_stats[1]);
968                 return;
969         }
970         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
971         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
972         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
973                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
974                 return;
975         }
976         printf(" + %d%% of %d pkts + %d%% of others]\n",
977                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
978 }
979 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
980
981 static void
982 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
983 {
984         struct rte_port *port;
985         uint8_t i;
986
987         static const char *fwd_stats_border = "----------------------";
988
989         port = &ports[port_id];
990         printf("\n  %s Forward statistics for port %-2d %s\n",
991                fwd_stats_border, port_id, fwd_stats_border);
992
993         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
994                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
995                        "%-"PRIu64"\n",
996                        stats->ipackets, stats->imissed,
997                        (uint64_t) (stats->ipackets + stats->imissed));
998
999                 if (cur_fwd_eng == &csum_fwd_engine)
1000                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1001                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1002                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1003                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1004                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1005                 }
1006
1007                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1008                        "%-"PRIu64"\n",
1009                        stats->opackets, port->tx_dropped,
1010                        (uint64_t) (stats->opackets + port->tx_dropped));
1011         }
1012         else {
1013                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1014                        "%14"PRIu64"\n",
1015                        stats->ipackets, stats->imissed,
1016                        (uint64_t) (stats->ipackets + stats->imissed));
1017
1018                 if (cur_fwd_eng == &csum_fwd_engine)
1019                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1020                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1021                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1022                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1023                         printf("  RX-nombufs:             %14"PRIu64"\n",
1024                                stats->rx_nombuf);
1025                 }
1026
1027                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1028                        "%14"PRIu64"\n",
1029                        stats->opackets, port->tx_dropped,
1030                        (uint64_t) (stats->opackets + port->tx_dropped));
1031         }
1032
1033 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1034         if (port->rx_stream)
1035                 pkt_burst_stats_display("RX",
1036                         &port->rx_stream->rx_burst_stats);
1037         if (port->tx_stream)
1038                 pkt_burst_stats_display("TX",
1039                         &port->tx_stream->tx_burst_stats);
1040 #endif
1041
1042         if (port->rx_queue_stats_mapping_enabled) {
1043                 printf("\n");
1044                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1045                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1046                                "     RX-errors:%14"PRIu64
1047                                "    RX-bytes:%14"PRIu64"\n",
1048                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1049                 }
1050                 printf("\n");
1051         }
1052         if (port->tx_queue_stats_mapping_enabled) {
1053                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1054                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1055                                "                                 TX-bytes:%14"PRIu64"\n",
1056                                i, stats->q_opackets[i], stats->q_obytes[i]);
1057                 }
1058         }
1059
1060         printf("  %s--------------------------------%s\n",
1061                fwd_stats_border, fwd_stats_border);
1062 }
1063
1064 static void
1065 fwd_stream_stats_display(streamid_t stream_id)
1066 {
1067         struct fwd_stream *fs;
1068         static const char *fwd_top_stats_border = "-------";
1069
1070         fs = fwd_streams[stream_id];
1071         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1072             (fs->fwd_dropped == 0))
1073                 return;
1074         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1075                "TX Port=%2d/Queue=%2d %s\n",
1076                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1077                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1078         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1079                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1080
1081         /* if checksum mode */
1082         if (cur_fwd_eng == &csum_fwd_engine) {
1083                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1084                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1085         }
1086
1087 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1088         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1089         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1090 #endif
1091 }
1092
1093 static void
1094 flush_fwd_rx_queues(void)
1095 {
1096         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1097         portid_t  rxp;
1098         portid_t port_id;
1099         queueid_t rxq;
1100         uint16_t  nb_rx;
1101         uint16_t  i;
1102         uint8_t   j;
1103         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1104         uint64_t timer_period;
1105
1106         /* convert to number of cycles */
1107         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1108
1109         for (j = 0; j < 2; j++) {
1110                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1111                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1112                                 port_id = fwd_ports_ids[rxp];
1113                                 /**
1114                                 * testpmd can stuck in the below do while loop
1115                                 * if rte_eth_rx_burst() always returns nonzero
1116                                 * packets. So timer is added to exit this loop
1117                                 * after 1sec timer expiry.
1118                                 */
1119                                 prev_tsc = rte_rdtsc();
1120                                 do {
1121                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1122                                                 pkts_burst, MAX_PKT_BURST);
1123                                         for (i = 0; i < nb_rx; i++)
1124                                                 rte_pktmbuf_free(pkts_burst[i]);
1125
1126                                         cur_tsc = rte_rdtsc();
1127                                         diff_tsc = cur_tsc - prev_tsc;
1128                                         timer_tsc += diff_tsc;
1129                                 } while ((nb_rx > 0) &&
1130                                         (timer_tsc < timer_period));
1131                                 timer_tsc = 0;
1132                         }
1133                 }
1134                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1135         }
1136 }
1137
1138 static void
1139 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1140 {
1141         struct fwd_stream **fsm;
1142         streamid_t nb_fs;
1143         streamid_t sm_id;
1144 #ifdef RTE_LIBRTE_BITRATE
1145         uint64_t tics_per_1sec;
1146         uint64_t tics_datum;
1147         uint64_t tics_current;
1148         uint16_t idx_port;
1149
1150         tics_datum = rte_rdtsc();
1151         tics_per_1sec = rte_get_timer_hz();
1152 #endif
1153         fsm = &fwd_streams[fc->stream_idx];
1154         nb_fs = fc->stream_nb;
1155         do {
1156                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1157                         (*pkt_fwd)(fsm[sm_id]);
1158 #ifdef RTE_LIBRTE_BITRATE
1159                 if (bitrate_enabled != 0 &&
1160                                 bitrate_lcore_id == rte_lcore_id()) {
1161                         tics_current = rte_rdtsc();
1162                         if (tics_current - tics_datum >= tics_per_1sec) {
1163                                 /* Periodic bitrate calculation */
1164                                 RTE_ETH_FOREACH_DEV(idx_port)
1165                                         rte_stats_bitrate_calc(bitrate_data,
1166                                                 idx_port);
1167                                 tics_datum = tics_current;
1168                         }
1169                 }
1170 #endif
1171 #ifdef RTE_LIBRTE_LATENCY_STATS
1172                 if (latencystats_enabled != 0 &&
1173                                 latencystats_lcore_id == rte_lcore_id())
1174                         rte_latencystats_update();
1175 #endif
1176
1177         } while (! fc->stopped);
1178 }
1179
1180 static int
1181 start_pkt_forward_on_core(void *fwd_arg)
1182 {
1183         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1184                              cur_fwd_config.fwd_eng->packet_fwd);
1185         return 0;
1186 }
1187
1188 /*
1189  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1190  * Used to start communication flows in network loopback test configurations.
1191  */
1192 static int
1193 run_one_txonly_burst_on_core(void *fwd_arg)
1194 {
1195         struct fwd_lcore *fwd_lc;
1196         struct fwd_lcore tmp_lcore;
1197
1198         fwd_lc = (struct fwd_lcore *) fwd_arg;
1199         tmp_lcore = *fwd_lc;
1200         tmp_lcore.stopped = 1;
1201         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1202         return 0;
1203 }
1204
1205 /*
1206  * Launch packet forwarding:
1207  *     - Setup per-port forwarding context.
1208  *     - launch logical cores with their forwarding configuration.
1209  */
1210 static void
1211 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1212 {
1213         port_fwd_begin_t port_fwd_begin;
1214         unsigned int i;
1215         unsigned int lc_id;
1216         int diag;
1217
1218         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1219         if (port_fwd_begin != NULL) {
1220                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1221                         (*port_fwd_begin)(fwd_ports_ids[i]);
1222         }
1223         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1224                 lc_id = fwd_lcores_cpuids[i];
1225                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1226                         fwd_lcores[i]->stopped = 0;
1227                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1228                                                      fwd_lcores[i], lc_id);
1229                         if (diag != 0)
1230                                 printf("launch lcore %u failed - diag=%d\n",
1231                                        lc_id, diag);
1232                 }
1233         }
1234 }
1235
1236 /*
1237  * Update the forward ports list.
1238  */
1239 void
1240 update_fwd_ports(portid_t new_pid)
1241 {
1242         unsigned int i;
1243         unsigned int new_nb_fwd_ports = 0;
1244         int move = 0;
1245
1246         for (i = 0; i < nb_fwd_ports; ++i) {
1247                 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1248                         move = 1;
1249                 else if (move)
1250                         fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1251                 else
1252                         new_nb_fwd_ports++;
1253         }
1254         if (new_pid < RTE_MAX_ETHPORTS)
1255                 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1256
1257         nb_fwd_ports = new_nb_fwd_ports;
1258         nb_cfg_ports = new_nb_fwd_ports;
1259 }
1260
1261 /*
1262  * Launch packet forwarding configuration.
1263  */
1264 void
1265 start_packet_forwarding(int with_tx_first)
1266 {
1267         port_fwd_begin_t port_fwd_begin;
1268         port_fwd_end_t  port_fwd_end;
1269         struct rte_port *port;
1270         unsigned int i;
1271         portid_t   pt_id;
1272         streamid_t sm_id;
1273
1274         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1275                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1276
1277         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1278                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1279
1280         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1281                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1282                 (!nb_rxq || !nb_txq))
1283                 rte_exit(EXIT_FAILURE,
1284                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1285                         cur_fwd_eng->fwd_mode_name);
1286
1287         if (all_ports_started() == 0) {
1288                 printf("Not all ports were started\n");
1289                 return;
1290         }
1291         if (test_done == 0) {
1292                 printf("Packet forwarding already started\n");
1293                 return;
1294         }
1295
1296
1297         if(dcb_test) {
1298                 for (i = 0; i < nb_fwd_ports; i++) {
1299                         pt_id = fwd_ports_ids[i];
1300                         port = &ports[pt_id];
1301                         if (!port->dcb_flag) {
1302                                 printf("In DCB mode, all forwarding ports must "
1303                                        "be configured in this mode.\n");
1304                                 return;
1305                         }
1306                 }
1307                 if (nb_fwd_lcores == 1) {
1308                         printf("In DCB mode,the nb forwarding cores "
1309                                "should be larger than 1.\n");
1310                         return;
1311                 }
1312         }
1313         test_done = 0;
1314
1315         fwd_config_setup();
1316
1317         if(!no_flush_rx)
1318                 flush_fwd_rx_queues();
1319
1320         pkt_fwd_config_display(&cur_fwd_config);
1321         rxtx_config_display();
1322
1323         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1324                 pt_id = fwd_ports_ids[i];
1325                 port = &ports[pt_id];
1326                 rte_eth_stats_get(pt_id, &port->stats);
1327                 port->tx_dropped = 0;
1328
1329                 map_port_queue_stats_mapping_registers(pt_id, port);
1330         }
1331         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1332                 fwd_streams[sm_id]->rx_packets = 0;
1333                 fwd_streams[sm_id]->tx_packets = 0;
1334                 fwd_streams[sm_id]->fwd_dropped = 0;
1335                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1336                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1337
1338 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1339                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1340                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1341                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1342                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1343 #endif
1344 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1345                 fwd_streams[sm_id]->core_cycles = 0;
1346 #endif
1347         }
1348         if (with_tx_first) {
1349                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1350                 if (port_fwd_begin != NULL) {
1351                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1352                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1353                 }
1354                 while (with_tx_first--) {
1355                         launch_packet_forwarding(
1356                                         run_one_txonly_burst_on_core);
1357                         rte_eal_mp_wait_lcore();
1358                 }
1359                 port_fwd_end = tx_only_engine.port_fwd_end;
1360                 if (port_fwd_end != NULL) {
1361                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1362                                 (*port_fwd_end)(fwd_ports_ids[i]);
1363                 }
1364         }
1365         launch_packet_forwarding(start_pkt_forward_on_core);
1366 }
1367
1368 void
1369 stop_packet_forwarding(void)
1370 {
1371         struct rte_eth_stats stats;
1372         struct rte_port *port;
1373         port_fwd_end_t  port_fwd_end;
1374         int i;
1375         portid_t   pt_id;
1376         streamid_t sm_id;
1377         lcoreid_t  lc_id;
1378         uint64_t total_recv;
1379         uint64_t total_xmit;
1380         uint64_t total_rx_dropped;
1381         uint64_t total_tx_dropped;
1382         uint64_t total_rx_nombuf;
1383         uint64_t tx_dropped;
1384         uint64_t rx_bad_ip_csum;
1385         uint64_t rx_bad_l4_csum;
1386 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1387         uint64_t fwd_cycles;
1388 #endif
1389
1390         static const char *acc_stats_border = "+++++++++++++++";
1391
1392         if (test_done) {
1393                 printf("Packet forwarding not started\n");
1394                 return;
1395         }
1396         printf("Telling cores to stop...");
1397         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1398                 fwd_lcores[lc_id]->stopped = 1;
1399         printf("\nWaiting for lcores to finish...\n");
1400         rte_eal_mp_wait_lcore();
1401         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1402         if (port_fwd_end != NULL) {
1403                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1404                         pt_id = fwd_ports_ids[i];
1405                         (*port_fwd_end)(pt_id);
1406                 }
1407         }
1408 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1409         fwd_cycles = 0;
1410 #endif
1411         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1412                 if (cur_fwd_config.nb_fwd_streams >
1413                     cur_fwd_config.nb_fwd_ports) {
1414                         fwd_stream_stats_display(sm_id);
1415                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1416                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1417                 } else {
1418                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1419                                 fwd_streams[sm_id];
1420                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1421                                 fwd_streams[sm_id];
1422                 }
1423                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1424                 tx_dropped = (uint64_t) (tx_dropped +
1425                                          fwd_streams[sm_id]->fwd_dropped);
1426                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1427
1428                 rx_bad_ip_csum =
1429                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1430                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1431                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1432                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1433                                                         rx_bad_ip_csum;
1434
1435                 rx_bad_l4_csum =
1436                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1437                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1438                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1439                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1440                                                         rx_bad_l4_csum;
1441
1442 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1443                 fwd_cycles = (uint64_t) (fwd_cycles +
1444                                          fwd_streams[sm_id]->core_cycles);
1445 #endif
1446         }
1447         total_recv = 0;
1448         total_xmit = 0;
1449         total_rx_dropped = 0;
1450         total_tx_dropped = 0;
1451         total_rx_nombuf  = 0;
1452         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1453                 pt_id = fwd_ports_ids[i];
1454
1455                 port = &ports[pt_id];
1456                 rte_eth_stats_get(pt_id, &stats);
1457                 stats.ipackets -= port->stats.ipackets;
1458                 port->stats.ipackets = 0;
1459                 stats.opackets -= port->stats.opackets;
1460                 port->stats.opackets = 0;
1461                 stats.ibytes   -= port->stats.ibytes;
1462                 port->stats.ibytes = 0;
1463                 stats.obytes   -= port->stats.obytes;
1464                 port->stats.obytes = 0;
1465                 stats.imissed  -= port->stats.imissed;
1466                 port->stats.imissed = 0;
1467                 stats.oerrors  -= port->stats.oerrors;
1468                 port->stats.oerrors = 0;
1469                 stats.rx_nombuf -= port->stats.rx_nombuf;
1470                 port->stats.rx_nombuf = 0;
1471
1472                 total_recv += stats.ipackets;
1473                 total_xmit += stats.opackets;
1474                 total_rx_dropped += stats.imissed;
1475                 total_tx_dropped += port->tx_dropped;
1476                 total_rx_nombuf  += stats.rx_nombuf;
1477
1478                 fwd_port_stats_display(pt_id, &stats);
1479         }
1480
1481         printf("\n  %s Accumulated forward statistics for all ports"
1482                "%s\n",
1483                acc_stats_border, acc_stats_border);
1484         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1485                "%-"PRIu64"\n"
1486                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1487                "%-"PRIu64"\n",
1488                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1489                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1490         if (total_rx_nombuf > 0)
1491                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1492         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1493                "%s\n",
1494                acc_stats_border, acc_stats_border);
1495 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1496         if (total_recv > 0)
1497                 printf("\n  CPU cycles/packet=%u (total cycles="
1498                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1499                        (unsigned int)(fwd_cycles / total_recv),
1500                        fwd_cycles, total_recv);
1501 #endif
1502         printf("\nDone.\n");
1503         test_done = 1;
1504 }
1505
1506 void
1507 dev_set_link_up(portid_t pid)
1508 {
1509         if (rte_eth_dev_set_link_up(pid) < 0)
1510                 printf("\nSet link up fail.\n");
1511 }
1512
1513 void
1514 dev_set_link_down(portid_t pid)
1515 {
1516         if (rte_eth_dev_set_link_down(pid) < 0)
1517                 printf("\nSet link down fail.\n");
1518 }
1519
1520 static int
1521 all_ports_started(void)
1522 {
1523         portid_t pi;
1524         struct rte_port *port;
1525
1526         RTE_ETH_FOREACH_DEV(pi) {
1527                 port = &ports[pi];
1528                 /* Check if there is a port which is not started */
1529                 if ((port->port_status != RTE_PORT_STARTED) &&
1530                         (port->slave_flag == 0))
1531                         return 0;
1532         }
1533
1534         /* No port is not started */
1535         return 1;
1536 }
1537
1538 int
1539 port_is_stopped(portid_t port_id)
1540 {
1541         struct rte_port *port = &ports[port_id];
1542
1543         if ((port->port_status != RTE_PORT_STOPPED) &&
1544             (port->slave_flag == 0))
1545                 return 0;
1546         return 1;
1547 }
1548
1549 int
1550 all_ports_stopped(void)
1551 {
1552         portid_t pi;
1553
1554         RTE_ETH_FOREACH_DEV(pi) {
1555                 if (!port_is_stopped(pi))
1556                         return 0;
1557         }
1558
1559         return 1;
1560 }
1561
1562 int
1563 port_is_started(portid_t port_id)
1564 {
1565         if (port_id_is_invalid(port_id, ENABLED_WARN))
1566                 return 0;
1567
1568         if (ports[port_id].port_status != RTE_PORT_STARTED)
1569                 return 0;
1570
1571         return 1;
1572 }
1573
1574 static int
1575 port_is_closed(portid_t port_id)
1576 {
1577         if (port_id_is_invalid(port_id, ENABLED_WARN))
1578                 return 0;
1579
1580         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1581                 return 0;
1582
1583         return 1;
1584 }
1585
1586 int
1587 start_port(portid_t pid)
1588 {
1589         int diag, need_check_link_status = -1;
1590         portid_t pi;
1591         queueid_t qi;
1592         struct rte_port *port;
1593         struct ether_addr mac_addr;
1594         enum rte_eth_event_type event_type;
1595
1596         if (port_id_is_invalid(pid, ENABLED_WARN))
1597                 return 0;
1598
1599         if(dcb_config)
1600                 dcb_test = 1;
1601         RTE_ETH_FOREACH_DEV(pi) {
1602                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1603                         continue;
1604
1605                 need_check_link_status = 0;
1606                 port = &ports[pi];
1607                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1608                                                  RTE_PORT_HANDLING) == 0) {
1609                         printf("Port %d is now not stopped\n", pi);
1610                         continue;
1611                 }
1612
1613                 if (port->need_reconfig > 0) {
1614                         port->need_reconfig = 0;
1615
1616                         if (flow_isolate_all) {
1617                                 int ret = port_flow_isolate(pi, 1);
1618                                 if (ret) {
1619                                         printf("Failed to apply isolated"
1620                                                " mode on port %d\n", pi);
1621                                         return -1;
1622                                 }
1623                         }
1624
1625                         printf("Configuring Port %d (socket %u)\n", pi,
1626                                         port->socket_id);
1627                         /* configure port */
1628                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1629                                                 &(port->dev_conf));
1630                         if (diag != 0) {
1631                                 if (rte_atomic16_cmpset(&(port->port_status),
1632                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1633                                         printf("Port %d can not be set back "
1634                                                         "to stopped\n", pi);
1635                                 printf("Fail to configure port %d\n", pi);
1636                                 /* try to reconfigure port next time */
1637                                 port->need_reconfig = 1;
1638                                 return -1;
1639                         }
1640                 }
1641                 if (port->need_reconfig_queues > 0) {
1642                         port->need_reconfig_queues = 0;
1643                         /* setup tx queues */
1644                         for (qi = 0; qi < nb_txq; qi++) {
1645                                 port->tx_conf[qi].txq_flags =
1646                                         ETH_TXQ_FLAGS_IGNORE;
1647                                 if ((numa_support) &&
1648                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1649                                         diag = rte_eth_tx_queue_setup(pi, qi,
1650                                                 port->nb_tx_desc[qi],
1651                                                 txring_numa[pi],
1652                                                 &(port->tx_conf[qi]));
1653                                 else
1654                                         diag = rte_eth_tx_queue_setup(pi, qi,
1655                                                 port->nb_tx_desc[qi],
1656                                                 port->socket_id,
1657                                                 &(port->tx_conf[qi]));
1658
1659                                 if (diag == 0)
1660                                         continue;
1661
1662                                 /* Fail to setup tx queue, return */
1663                                 if (rte_atomic16_cmpset(&(port->port_status),
1664                                                         RTE_PORT_HANDLING,
1665                                                         RTE_PORT_STOPPED) == 0)
1666                                         printf("Port %d can not be set back "
1667                                                         "to stopped\n", pi);
1668                                 printf("Fail to configure port %d tx queues\n",
1669                                        pi);
1670                                 /* try to reconfigure queues next time */
1671                                 port->need_reconfig_queues = 1;
1672                                 return -1;
1673                         }
1674                         for (qi = 0; qi < nb_rxq; qi++) {
1675                                 /* setup rx queues */
1676                                 if ((numa_support) &&
1677                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1678                                         struct rte_mempool * mp =
1679                                                 mbuf_pool_find(rxring_numa[pi]);
1680                                         if (mp == NULL) {
1681                                                 printf("Failed to setup RX queue:"
1682                                                         "No mempool allocation"
1683                                                         " on the socket %d\n",
1684                                                         rxring_numa[pi]);
1685                                                 return -1;
1686                                         }
1687
1688                                         diag = rte_eth_rx_queue_setup(pi, qi,
1689                                              port->nb_rx_desc[pi],
1690                                              rxring_numa[pi],
1691                                              &(port->rx_conf[qi]),
1692                                              mp);
1693                                 } else {
1694                                         struct rte_mempool *mp =
1695                                                 mbuf_pool_find(port->socket_id);
1696                                         if (mp == NULL) {
1697                                                 printf("Failed to setup RX queue:"
1698                                                         "No mempool allocation"
1699                                                         " on the socket %d\n",
1700                                                         port->socket_id);
1701                                                 return -1;
1702                                         }
1703                                         diag = rte_eth_rx_queue_setup(pi, qi,
1704                                              port->nb_rx_desc[pi],
1705                                              port->socket_id,
1706                                              &(port->rx_conf[qi]),
1707                                              mp);
1708                                 }
1709                                 if (diag == 0)
1710                                         continue;
1711
1712                                 /* Fail to setup rx queue, return */
1713                                 if (rte_atomic16_cmpset(&(port->port_status),
1714                                                         RTE_PORT_HANDLING,
1715                                                         RTE_PORT_STOPPED) == 0)
1716                                         printf("Port %d can not be set back "
1717                                                         "to stopped\n", pi);
1718                                 printf("Fail to configure port %d rx queues\n",
1719                                        pi);
1720                                 /* try to reconfigure queues next time */
1721                                 port->need_reconfig_queues = 1;
1722                                 return -1;
1723                         }
1724                 }
1725
1726                 /* start port */
1727                 if (rte_eth_dev_start(pi) < 0) {
1728                         printf("Fail to start port %d\n", pi);
1729
1730                         /* Fail to setup rx queue, return */
1731                         if (rte_atomic16_cmpset(&(port->port_status),
1732                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1733                                 printf("Port %d can not be set back to "
1734                                                         "stopped\n", pi);
1735                         continue;
1736                 }
1737
1738                 if (rte_atomic16_cmpset(&(port->port_status),
1739                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1740                         printf("Port %d can not be set into started\n", pi);
1741
1742                 rte_eth_macaddr_get(pi, &mac_addr);
1743                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1744                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1745                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1746                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1747
1748                 /* at least one port started, need checking link status */
1749                 need_check_link_status = 1;
1750         }
1751
1752         for (event_type = RTE_ETH_EVENT_UNKNOWN;
1753              event_type < RTE_ETH_EVENT_MAX;
1754              event_type++) {
1755                 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1756                                                 event_type,
1757                                                 eth_event_callback,
1758                                                 NULL);
1759                 if (diag) {
1760                         printf("Failed to setup even callback for event %d\n",
1761                                 event_type);
1762                         return -1;
1763                 }
1764         }
1765
1766         if (need_check_link_status == 1 && !no_link_check)
1767                 check_all_ports_link_status(RTE_PORT_ALL);
1768         else if (need_check_link_status == 0)
1769                 printf("Please stop the ports first\n");
1770
1771         printf("Done\n");
1772         return 0;
1773 }
1774
1775 void
1776 stop_port(portid_t pid)
1777 {
1778         portid_t pi;
1779         struct rte_port *port;
1780         int need_check_link_status = 0;
1781
1782         if (dcb_test) {
1783                 dcb_test = 0;
1784                 dcb_config = 0;
1785         }
1786
1787         if (port_id_is_invalid(pid, ENABLED_WARN))
1788                 return;
1789
1790         printf("Stopping ports...\n");
1791
1792         RTE_ETH_FOREACH_DEV(pi) {
1793                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1794                         continue;
1795
1796                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1797                         printf("Please remove port %d from forwarding configuration.\n", pi);
1798                         continue;
1799                 }
1800
1801                 if (port_is_bonding_slave(pi)) {
1802                         printf("Please remove port %d from bonded device.\n", pi);
1803                         continue;
1804                 }
1805
1806                 port = &ports[pi];
1807                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1808                                                 RTE_PORT_HANDLING) == 0)
1809                         continue;
1810
1811                 rte_eth_dev_stop(pi);
1812
1813                 if (rte_atomic16_cmpset(&(port->port_status),
1814                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1815                         printf("Port %d can not be set into stopped\n", pi);
1816                 need_check_link_status = 1;
1817         }
1818         if (need_check_link_status && !no_link_check)
1819                 check_all_ports_link_status(RTE_PORT_ALL);
1820
1821         printf("Done\n");
1822 }
1823
1824 void
1825 close_port(portid_t pid)
1826 {
1827         portid_t pi;
1828         struct rte_port *port;
1829
1830         if (port_id_is_invalid(pid, ENABLED_WARN))
1831                 return;
1832
1833         printf("Closing ports...\n");
1834
1835         RTE_ETH_FOREACH_DEV(pi) {
1836                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1837                         continue;
1838
1839                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1840                         printf("Please remove port %d from forwarding configuration.\n", pi);
1841                         continue;
1842                 }
1843
1844                 if (port_is_bonding_slave(pi)) {
1845                         printf("Please remove port %d from bonded device.\n", pi);
1846                         continue;
1847                 }
1848
1849                 port = &ports[pi];
1850                 if (rte_atomic16_cmpset(&(port->port_status),
1851                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1852                         printf("Port %d is already closed\n", pi);
1853                         continue;
1854                 }
1855
1856                 if (rte_atomic16_cmpset(&(port->port_status),
1857                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1858                         printf("Port %d is now not stopped\n", pi);
1859                         continue;
1860                 }
1861
1862                 if (port->flow_list)
1863                         port_flow_flush(pi);
1864                 rte_eth_dev_close(pi);
1865
1866                 if (rte_atomic16_cmpset(&(port->port_status),
1867                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1868                         printf("Port %d cannot be set to closed\n", pi);
1869         }
1870
1871         printf("Done\n");
1872 }
1873
1874 void
1875 reset_port(portid_t pid)
1876 {
1877         int diag;
1878         portid_t pi;
1879         struct rte_port *port;
1880
1881         if (port_id_is_invalid(pid, ENABLED_WARN))
1882                 return;
1883
1884         printf("Resetting ports...\n");
1885
1886         RTE_ETH_FOREACH_DEV(pi) {
1887                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1888                         continue;
1889
1890                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1891                         printf("Please remove port %d from forwarding "
1892                                "configuration.\n", pi);
1893                         continue;
1894                 }
1895
1896                 if (port_is_bonding_slave(pi)) {
1897                         printf("Please remove port %d from bonded device.\n",
1898                                pi);
1899                         continue;
1900                 }
1901
1902                 diag = rte_eth_dev_reset(pi);
1903                 if (diag == 0) {
1904                         port = &ports[pi];
1905                         port->need_reconfig = 1;
1906                         port->need_reconfig_queues = 1;
1907                 } else {
1908                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1909                 }
1910         }
1911
1912         printf("Done\n");
1913 }
1914
1915 static int
1916 eth_dev_event_callback_register(void)
1917 {
1918         int ret;
1919
1920         /* register the device event callback */
1921         ret = rte_dev_event_callback_register(NULL,
1922                 eth_dev_event_callback, NULL);
1923         if (ret) {
1924                 printf("Failed to register device event callback\n");
1925                 return -1;
1926         }
1927
1928         return 0;
1929 }
1930
1931
1932 static int
1933 eth_dev_event_callback_unregister(void)
1934 {
1935         int ret;
1936
1937         /* unregister the device event callback */
1938         ret = rte_dev_event_callback_unregister(NULL,
1939                 eth_dev_event_callback, NULL);
1940         if (ret < 0) {
1941                 printf("Failed to unregister device event callback\n");
1942                 return -1;
1943         }
1944
1945         return 0;
1946 }
1947
1948 void
1949 attach_port(char *identifier)
1950 {
1951         portid_t pi = 0;
1952         unsigned int socket_id;
1953
1954         printf("Attaching a new port...\n");
1955
1956         if (identifier == NULL) {
1957                 printf("Invalid parameters are specified\n");
1958                 return;
1959         }
1960
1961         if (rte_eth_dev_attach(identifier, &pi))
1962                 return;
1963
1964         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1965         /* if socket_id is invalid, set to 0 */
1966         if (check_socket_id(socket_id) < 0)
1967                 socket_id = 0;
1968         reconfig(pi, socket_id);
1969         rte_eth_promiscuous_enable(pi);
1970
1971         nb_ports = rte_eth_dev_count_avail();
1972
1973         ports[pi].port_status = RTE_PORT_STOPPED;
1974
1975         update_fwd_ports(pi);
1976
1977         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1978         printf("Done\n");
1979 }
1980
1981 void
1982 detach_port(portid_t port_id)
1983 {
1984         char name[RTE_ETH_NAME_MAX_LEN];
1985
1986         printf("Detaching a port...\n");
1987
1988         if (!port_is_closed(port_id)) {
1989                 printf("Please close port first\n");
1990                 return;
1991         }
1992
1993         if (ports[port_id].flow_list)
1994                 port_flow_flush(port_id);
1995
1996         if (rte_eth_dev_detach(port_id, name)) {
1997                 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
1998                 return;
1999         }
2000
2001         nb_ports = rte_eth_dev_count_avail();
2002
2003         update_fwd_ports(RTE_MAX_ETHPORTS);
2004
2005         printf("Port %u is detached. Now total ports is %d\n",
2006                         port_id, nb_ports);
2007         printf("Done\n");
2008         return;
2009 }
2010
2011 void
2012 pmd_test_exit(void)
2013 {
2014         portid_t pt_id;
2015         int ret;
2016
2017         if (test_done == 0)
2018                 stop_packet_forwarding();
2019
2020         if (ports != NULL) {
2021                 no_link_check = 1;
2022                 RTE_ETH_FOREACH_DEV(pt_id) {
2023                         printf("\nShutting down port %d...\n", pt_id);
2024                         fflush(stdout);
2025                         stop_port(pt_id);
2026                         close_port(pt_id);
2027                 }
2028         }
2029
2030         if (hot_plug) {
2031                 ret = rte_dev_event_monitor_stop();
2032                 if (ret)
2033                         RTE_LOG(ERR, EAL,
2034                                 "fail to stop device event monitor.");
2035
2036                 ret = eth_dev_event_callback_unregister();
2037                 if (ret)
2038                         RTE_LOG(ERR, EAL,
2039                                 "fail to unregister all event callbacks.");
2040         }
2041
2042         printf("\nBye...\n");
2043 }
2044
2045 typedef void (*cmd_func_t)(void);
2046 struct pmd_test_command {
2047         const char *cmd_name;
2048         cmd_func_t cmd_func;
2049 };
2050
2051 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2052
2053 /* Check the link status of all ports in up to 9s, and print them finally */
2054 static void
2055 check_all_ports_link_status(uint32_t port_mask)
2056 {
2057 #define CHECK_INTERVAL 100 /* 100ms */
2058 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2059         portid_t portid;
2060         uint8_t count, all_ports_up, print_flag = 0;
2061         struct rte_eth_link link;
2062
2063         printf("Checking link statuses...\n");
2064         fflush(stdout);
2065         for (count = 0; count <= MAX_CHECK_TIME; count++) {
2066                 all_ports_up = 1;
2067                 RTE_ETH_FOREACH_DEV(portid) {
2068                         if ((port_mask & (1 << portid)) == 0)
2069                                 continue;
2070                         memset(&link, 0, sizeof(link));
2071                         rte_eth_link_get_nowait(portid, &link);
2072                         /* print link status if flag set */
2073                         if (print_flag == 1) {
2074                                 if (link.link_status)
2075                                         printf(
2076                                         "Port%d Link Up. speed %u Mbps- %s\n",
2077                                         portid, link.link_speed,
2078                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2079                                         ("full-duplex") : ("half-duplex\n"));
2080                                 else
2081                                         printf("Port %d Link Down\n", portid);
2082                                 continue;
2083                         }
2084                         /* clear all_ports_up flag if any link down */
2085                         if (link.link_status == ETH_LINK_DOWN) {
2086                                 all_ports_up = 0;
2087                                 break;
2088                         }
2089                 }
2090                 /* after finally printing all link status, get out */
2091                 if (print_flag == 1)
2092                         break;
2093
2094                 if (all_ports_up == 0) {
2095                         fflush(stdout);
2096                         rte_delay_ms(CHECK_INTERVAL);
2097                 }
2098
2099                 /* set the print_flag if all ports up or timeout */
2100                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2101                         print_flag = 1;
2102                 }
2103
2104                 if (lsc_interrupt)
2105                         break;
2106         }
2107 }
2108
2109 static void
2110 rmv_event_callback(void *arg)
2111 {
2112         int need_to_start = 0;
2113         int org_no_link_check = no_link_check;
2114         portid_t port_id = (intptr_t)arg;
2115
2116         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2117
2118         if (!test_done && port_is_forwarding(port_id)) {
2119                 need_to_start = 1;
2120                 stop_packet_forwarding();
2121         }
2122         no_link_check = 1;
2123         stop_port(port_id);
2124         no_link_check = org_no_link_check;
2125         close_port(port_id);
2126         detach_port(port_id);
2127         if (need_to_start)
2128                 start_packet_forwarding(0);
2129 }
2130
2131 /* This function is used by the interrupt thread */
2132 static int
2133 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2134                   void *ret_param)
2135 {
2136         static const char * const event_desc[] = {
2137                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2138                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2139                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2140                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2141                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2142                 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2143                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2144                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2145                 [RTE_ETH_EVENT_NEW] = "device probed",
2146                 [RTE_ETH_EVENT_DESTROY] = "device released",
2147                 [RTE_ETH_EVENT_MAX] = NULL,
2148         };
2149
2150         RTE_SET_USED(param);
2151         RTE_SET_USED(ret_param);
2152
2153         if (type >= RTE_ETH_EVENT_MAX) {
2154                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2155                         port_id, __func__, type);
2156                 fflush(stderr);
2157         } else if (event_print_mask & (UINT32_C(1) << type)) {
2158                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2159                         event_desc[type]);
2160                 fflush(stdout);
2161         }
2162
2163         if (port_id_is_invalid(port_id, DISABLED_WARN))
2164                 return 0;
2165
2166         switch (type) {
2167         case RTE_ETH_EVENT_INTR_RMV:
2168                 if (rte_eal_alarm_set(100000,
2169                                 rmv_event_callback, (void *)(intptr_t)port_id))
2170                         fprintf(stderr, "Could not set up deferred device removal\n");
2171                 break;
2172         default:
2173                 break;
2174         }
2175         return 0;
2176 }
2177
2178 /* This function is used by the interrupt thread */
2179 static void
2180 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2181                              __rte_unused void *arg)
2182 {
2183         if (type >= RTE_DEV_EVENT_MAX) {
2184                 fprintf(stderr, "%s called upon invalid event %d\n",
2185                         __func__, type);
2186                 fflush(stderr);
2187         }
2188
2189         switch (type) {
2190         case RTE_DEV_EVENT_REMOVE:
2191                 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2192                         device_name);
2193                 /* TODO: After finish failure handle, begin to stop
2194                  * packet forward, stop port, close port, detach port.
2195                  */
2196                 break;
2197         case RTE_DEV_EVENT_ADD:
2198                 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2199                         device_name);
2200                 /* TODO: After finish kernel driver binding,
2201                  * begin to attach port.
2202                  */
2203                 break;
2204         default:
2205                 break;
2206         }
2207 }
2208
2209 static int
2210 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2211 {
2212         uint16_t i;
2213         int diag;
2214         uint8_t mapping_found = 0;
2215
2216         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2217                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2218                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2219                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2220                                         tx_queue_stats_mappings[i].queue_id,
2221                                         tx_queue_stats_mappings[i].stats_counter_id);
2222                         if (diag != 0)
2223                                 return diag;
2224                         mapping_found = 1;
2225                 }
2226         }
2227         if (mapping_found)
2228                 port->tx_queue_stats_mapping_enabled = 1;
2229         return 0;
2230 }
2231
2232 static int
2233 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2234 {
2235         uint16_t i;
2236         int diag;
2237         uint8_t mapping_found = 0;
2238
2239         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2240                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2241                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2242                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2243                                         rx_queue_stats_mappings[i].queue_id,
2244                                         rx_queue_stats_mappings[i].stats_counter_id);
2245                         if (diag != 0)
2246                                 return diag;
2247                         mapping_found = 1;
2248                 }
2249         }
2250         if (mapping_found)
2251                 port->rx_queue_stats_mapping_enabled = 1;
2252         return 0;
2253 }
2254
2255 static void
2256 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2257 {
2258         int diag = 0;
2259
2260         diag = set_tx_queue_stats_mapping_registers(pi, port);
2261         if (diag != 0) {
2262                 if (diag == -ENOTSUP) {
2263                         port->tx_queue_stats_mapping_enabled = 0;
2264                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2265                 }
2266                 else
2267                         rte_exit(EXIT_FAILURE,
2268                                         "set_tx_queue_stats_mapping_registers "
2269                                         "failed for port id=%d diag=%d\n",
2270                                         pi, diag);
2271         }
2272
2273         diag = set_rx_queue_stats_mapping_registers(pi, port);
2274         if (diag != 0) {
2275                 if (diag == -ENOTSUP) {
2276                         port->rx_queue_stats_mapping_enabled = 0;
2277                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2278                 }
2279                 else
2280                         rte_exit(EXIT_FAILURE,
2281                                         "set_rx_queue_stats_mapping_registers "
2282                                         "failed for port id=%d diag=%d\n",
2283                                         pi, diag);
2284         }
2285 }
2286
2287 static void
2288 rxtx_port_config(struct rte_port *port)
2289 {
2290         uint16_t qid;
2291
2292         for (qid = 0; qid < nb_rxq; qid++) {
2293                 port->rx_conf[qid] = port->dev_info.default_rxconf;
2294
2295                 /* Check if any Rx parameters have been passed */
2296                 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2297                         port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2298
2299                 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2300                         port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2301
2302                 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2303                         port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2304
2305                 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2306                         port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2307
2308                 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2309                         port->rx_conf[qid].rx_drop_en = rx_drop_en;
2310
2311                 port->nb_rx_desc[qid] = nb_rxd;
2312         }
2313
2314         for (qid = 0; qid < nb_txq; qid++) {
2315                 port->tx_conf[qid] = port->dev_info.default_txconf;
2316
2317                 /* Check if any Tx parameters have been passed */
2318                 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2319                         port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2320
2321                 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2322                         port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2323
2324                 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2325                         port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2326
2327                 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2328                         port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2329
2330                 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2331                         port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2332
2333                 port->nb_tx_desc[qid] = nb_txd;
2334         }
2335 }
2336
2337 void
2338 init_port_config(void)
2339 {
2340         portid_t pid;
2341         struct rte_port *port;
2342         struct rte_eth_dev_info dev_info;
2343
2344         RTE_ETH_FOREACH_DEV(pid) {
2345                 port = &ports[pid];
2346                 port->dev_conf.fdir_conf = fdir_conf;
2347                 if (nb_rxq > 1) {
2348                         rte_eth_dev_info_get(pid, &dev_info);
2349                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2350                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2351                                 rss_hf & dev_info.flow_type_rss_offloads;
2352                 } else {
2353                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2354                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2355                 }
2356
2357                 if (port->dcb_flag == 0) {
2358                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2359                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2360                         else
2361                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2362                 }
2363
2364                 rxtx_port_config(port);
2365
2366                 rte_eth_macaddr_get(pid, &port->eth_addr);
2367
2368                 map_port_queue_stats_mapping_registers(pid, port);
2369 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2370                 rte_pmd_ixgbe_bypass_init(pid);
2371 #endif
2372
2373                 if (lsc_interrupt &&
2374                     (rte_eth_devices[pid].data->dev_flags &
2375                      RTE_ETH_DEV_INTR_LSC))
2376                         port->dev_conf.intr_conf.lsc = 1;
2377                 if (rmv_interrupt &&
2378                     (rte_eth_devices[pid].data->dev_flags &
2379                      RTE_ETH_DEV_INTR_RMV))
2380                         port->dev_conf.intr_conf.rmv = 1;
2381
2382 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2383                 /* Detect softnic port */
2384                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2385                         port->softnic_enable = 1;
2386                         memset(&port->softport, 0, sizeof(struct softnic_port));
2387
2388                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2389                                 port->softport.tm_flag = 1;
2390                 }
2391 #endif
2392         }
2393 }
2394
2395 void set_port_slave_flag(portid_t slave_pid)
2396 {
2397         struct rte_port *port;
2398
2399         port = &ports[slave_pid];
2400         port->slave_flag = 1;
2401 }
2402
2403 void clear_port_slave_flag(portid_t slave_pid)
2404 {
2405         struct rte_port *port;
2406
2407         port = &ports[slave_pid];
2408         port->slave_flag = 0;
2409 }
2410
2411 uint8_t port_is_bonding_slave(portid_t slave_pid)
2412 {
2413         struct rte_port *port;
2414
2415         port = &ports[slave_pid];
2416         if ((rte_eth_devices[slave_pid].data->dev_flags &
2417             RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2418                 return 1;
2419         return 0;
2420 }
2421
2422 const uint16_t vlan_tags[] = {
2423                 0,  1,  2,  3,  4,  5,  6,  7,
2424                 8,  9, 10, 11,  12, 13, 14, 15,
2425                 16, 17, 18, 19, 20, 21, 22, 23,
2426                 24, 25, 26, 27, 28, 29, 30, 31
2427 };
2428
2429 static  int
2430 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2431                  enum dcb_mode_enable dcb_mode,
2432                  enum rte_eth_nb_tcs num_tcs,
2433                  uint8_t pfc_en)
2434 {
2435         uint8_t i;
2436
2437         /*
2438          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2439          * given above, and the number of traffic classes available for use.
2440          */
2441         if (dcb_mode == DCB_VT_ENABLED) {
2442                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2443                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2444                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2445                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2446
2447                 /* VMDQ+DCB RX and TX configurations */
2448                 vmdq_rx_conf->enable_default_pool = 0;
2449                 vmdq_rx_conf->default_pool = 0;
2450                 vmdq_rx_conf->nb_queue_pools =
2451                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2452                 vmdq_tx_conf->nb_queue_pools =
2453                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2454
2455                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2456                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2457                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2458                         vmdq_rx_conf->pool_map[i].pools =
2459                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2460                 }
2461                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2462                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2463                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2464                 }
2465
2466                 /* set DCB mode of RX and TX of multiple queues */
2467                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2468                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2469         } else {
2470                 struct rte_eth_dcb_rx_conf *rx_conf =
2471                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2472                 struct rte_eth_dcb_tx_conf *tx_conf =
2473                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2474
2475                 rx_conf->nb_tcs = num_tcs;
2476                 tx_conf->nb_tcs = num_tcs;
2477
2478                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2479                         rx_conf->dcb_tc[i] = i % num_tcs;
2480                         tx_conf->dcb_tc[i] = i % num_tcs;
2481                 }
2482                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2483                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2484                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2485         }
2486
2487         if (pfc_en)
2488                 eth_conf->dcb_capability_en =
2489                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2490         else
2491                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2492
2493         return 0;
2494 }
2495
2496 int
2497 init_port_dcb_config(portid_t pid,
2498                      enum dcb_mode_enable dcb_mode,
2499                      enum rte_eth_nb_tcs num_tcs,
2500                      uint8_t pfc_en)
2501 {
2502         struct rte_eth_conf port_conf;
2503         struct rte_port *rte_port;
2504         int retval;
2505         uint16_t i;
2506
2507         rte_port = &ports[pid];
2508
2509         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2510         /* Enter DCB configuration status */
2511         dcb_config = 1;
2512
2513         port_conf.rxmode = rte_port->dev_conf.rxmode;
2514         port_conf.txmode = rte_port->dev_conf.txmode;
2515
2516         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2517         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2518         if (retval < 0)
2519                 return retval;
2520         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2521
2522         /* re-configure the device . */
2523         rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2524
2525         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2526
2527         /* If dev_info.vmdq_pool_base is greater than 0,
2528          * the queue id of vmdq pools is started after pf queues.
2529          */
2530         if (dcb_mode == DCB_VT_ENABLED &&
2531             rte_port->dev_info.vmdq_pool_base > 0) {
2532                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2533                         " for port %d.", pid);
2534                 return -1;
2535         }
2536
2537         /* Assume the ports in testpmd have the same dcb capability
2538          * and has the same number of rxq and txq in dcb mode
2539          */
2540         if (dcb_mode == DCB_VT_ENABLED) {
2541                 if (rte_port->dev_info.max_vfs > 0) {
2542                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2543                         nb_txq = rte_port->dev_info.nb_tx_queues;
2544                 } else {
2545                         nb_rxq = rte_port->dev_info.max_rx_queues;
2546                         nb_txq = rte_port->dev_info.max_tx_queues;
2547                 }
2548         } else {
2549                 /*if vt is disabled, use all pf queues */
2550                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2551                         nb_rxq = rte_port->dev_info.max_rx_queues;
2552                         nb_txq = rte_port->dev_info.max_tx_queues;
2553                 } else {
2554                         nb_rxq = (queueid_t)num_tcs;
2555                         nb_txq = (queueid_t)num_tcs;
2556
2557                 }
2558         }
2559         rx_free_thresh = 64;
2560
2561         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2562
2563         rxtx_port_config(rte_port);
2564         /* VLAN filter */
2565         rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2566         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2567                 rx_vft_set(pid, vlan_tags[i], 1);
2568
2569         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2570         map_port_queue_stats_mapping_registers(pid, rte_port);
2571
2572         rte_port->dcb_flag = 1;
2573
2574         return 0;
2575 }
2576
2577 static void
2578 init_port(void)
2579 {
2580         /* Configuration of Ethernet ports. */
2581         ports = rte_zmalloc("testpmd: ports",
2582                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2583                             RTE_CACHE_LINE_SIZE);
2584         if (ports == NULL) {
2585                 rte_exit(EXIT_FAILURE,
2586                                 "rte_zmalloc(%d struct rte_port) failed\n",
2587                                 RTE_MAX_ETHPORTS);
2588         }
2589 }
2590
2591 static void
2592 force_quit(void)
2593 {
2594         pmd_test_exit();
2595         prompt_exit();
2596 }
2597
2598 static void
2599 print_stats(void)
2600 {
2601         uint8_t i;
2602         const char clr[] = { 27, '[', '2', 'J', '\0' };
2603         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2604
2605         /* Clear screen and move to top left */
2606         printf("%s%s", clr, top_left);
2607
2608         printf("\nPort statistics ====================================");
2609         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2610                 nic_stats_display(fwd_ports_ids[i]);
2611 }
2612
2613 static void
2614 signal_handler(int signum)
2615 {
2616         if (signum == SIGINT || signum == SIGTERM) {
2617                 printf("\nSignal %d received, preparing to exit...\n",
2618                                 signum);
2619 #ifdef RTE_LIBRTE_PDUMP
2620                 /* uninitialize packet capture framework */
2621                 rte_pdump_uninit();
2622 #endif
2623 #ifdef RTE_LIBRTE_LATENCY_STATS
2624                 rte_latencystats_uninit();
2625 #endif
2626                 force_quit();
2627                 /* Set flag to indicate the force termination. */
2628                 f_quit = 1;
2629                 /* exit with the expected status */
2630                 signal(signum, SIG_DFL);
2631                 kill(getpid(), signum);
2632         }
2633 }
2634
2635 int
2636 main(int argc, char** argv)
2637 {
2638         int diag;
2639         portid_t port_id;
2640         int ret;
2641
2642         signal(SIGINT, signal_handler);
2643         signal(SIGTERM, signal_handler);
2644
2645         diag = rte_eal_init(argc, argv);
2646         if (diag < 0)
2647                 rte_panic("Cannot init EAL\n");
2648
2649         testpmd_logtype = rte_log_register("testpmd");
2650         if (testpmd_logtype < 0)
2651                 rte_panic("Cannot register log type");
2652         rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2653
2654 #ifdef RTE_LIBRTE_PDUMP
2655         /* initialize packet capture framework */
2656         rte_pdump_init(NULL);
2657 #endif
2658
2659         nb_ports = (portid_t) rte_eth_dev_count_avail();
2660         if (nb_ports == 0)
2661                 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2662
2663         /* allocate port structures, and init them */
2664         init_port();
2665
2666         set_def_fwd_config();
2667         if (nb_lcores == 0)
2668                 rte_panic("Empty set of forwarding logical cores - check the "
2669                           "core mask supplied in the command parameters\n");
2670
2671         /* Bitrate/latency stats disabled by default */
2672 #ifdef RTE_LIBRTE_BITRATE
2673         bitrate_enabled = 0;
2674 #endif
2675 #ifdef RTE_LIBRTE_LATENCY_STATS
2676         latencystats_enabled = 0;
2677 #endif
2678
2679         /* on FreeBSD, mlockall() is disabled by default */
2680 #ifdef RTE_EXEC_ENV_BSDAPP
2681         do_mlockall = 0;
2682 #else
2683         do_mlockall = 1;
2684 #endif
2685
2686         argc -= diag;
2687         argv += diag;
2688         if (argc > 1)
2689                 launch_args_parse(argc, argv);
2690
2691         if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2692                 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2693                         strerror(errno));
2694         }
2695
2696         if (tx_first && interactive)
2697                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2698                                 "interactive mode.\n");
2699
2700         if (tx_first && lsc_interrupt) {
2701                 printf("Warning: lsc_interrupt needs to be off when "
2702                                 " using tx_first. Disabling.\n");
2703                 lsc_interrupt = 0;
2704         }
2705
2706         if (!nb_rxq && !nb_txq)
2707                 printf("Warning: Either rx or tx queues should be non-zero\n");
2708
2709         if (nb_rxq > 1 && nb_rxq > nb_txq)
2710                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2711                        "but nb_txq=%d will prevent to fully test it.\n",
2712                        nb_rxq, nb_txq);
2713
2714         init_config();
2715
2716         if (hot_plug) {
2717                 /* enable hot plug monitoring */
2718                 ret = rte_dev_event_monitor_start();
2719                 if (ret) {
2720                         rte_errno = EINVAL;
2721                         return -1;
2722                 }
2723                 eth_dev_event_callback_register();
2724
2725         }
2726
2727         if (start_port(RTE_PORT_ALL) != 0)
2728                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2729
2730         /* set all ports to promiscuous mode by default */
2731         RTE_ETH_FOREACH_DEV(port_id)
2732                 rte_eth_promiscuous_enable(port_id);
2733
2734         /* Init metrics library */
2735         rte_metrics_init(rte_socket_id());
2736
2737 #ifdef RTE_LIBRTE_LATENCY_STATS
2738         if (latencystats_enabled != 0) {
2739                 int ret = rte_latencystats_init(1, NULL);
2740                 if (ret)
2741                         printf("Warning: latencystats init()"
2742                                 " returned error %d\n", ret);
2743                 printf("Latencystats running on lcore %d\n",
2744                         latencystats_lcore_id);
2745         }
2746 #endif
2747
2748         /* Setup bitrate stats */
2749 #ifdef RTE_LIBRTE_BITRATE
2750         if (bitrate_enabled != 0) {
2751                 bitrate_data = rte_stats_bitrate_create();
2752                 if (bitrate_data == NULL)
2753                         rte_exit(EXIT_FAILURE,
2754                                 "Could not allocate bitrate data.\n");
2755                 rte_stats_bitrate_reg(bitrate_data);
2756         }
2757 #endif
2758
2759 #ifdef RTE_LIBRTE_CMDLINE
2760         if (strlen(cmdline_filename) != 0)
2761                 cmdline_read_from_file(cmdline_filename);
2762
2763         if (interactive == 1) {
2764                 if (auto_start) {
2765                         printf("Start automatic packet forwarding\n");
2766                         start_packet_forwarding(0);
2767                 }
2768                 prompt();
2769                 pmd_test_exit();
2770         } else
2771 #endif
2772         {
2773                 char c;
2774                 int rc;
2775
2776                 f_quit = 0;
2777
2778                 printf("No commandline core given, start packet forwarding\n");
2779                 start_packet_forwarding(tx_first);
2780                 if (stats_period != 0) {
2781                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2782                         uint64_t timer_period;
2783
2784                         /* Convert to number of cycles */
2785                         timer_period = stats_period * rte_get_timer_hz();
2786
2787                         while (f_quit == 0) {
2788                                 cur_time = rte_get_timer_cycles();
2789                                 diff_time += cur_time - prev_time;
2790
2791                                 if (diff_time >= timer_period) {
2792                                         print_stats();
2793                                         /* Reset the timer */
2794                                         diff_time = 0;
2795                                 }
2796                                 /* Sleep to avoid unnecessary checks */
2797                                 prev_time = cur_time;
2798                                 sleep(1);
2799                         }
2800                 }
2801
2802                 printf("Press enter to exit\n");
2803                 rc = read(0, &c, 1);
2804                 pmd_test_exit();
2805                 if (rc < 0)
2806                         return 1;
2807         }
2808
2809         return 0;
2810 }