event/sw: fix credit tracking in port dequeue
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
81 #endif
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
84 #endif
85 #include <rte_flow.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
89 #endif
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
92 #endif
93
94 #include "testpmd.h"
95
96 uint16_t verbose_level = 0; /**< Silent by default. */
97
98 /* use master core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
101 char cmdline_filename[PATH_MAX] = {0};
102
103 /*
104  * NUMA support configuration.
105  * When set, the NUMA support attempts to dispatch the allocation of the
106  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
107  * probed ports among the CPU sockets 0 and 1.
108  * Otherwise, all memory is allocated from CPU socket 0.
109  */
110 uint8_t numa_support = 1; /**< numa enabled by default */
111
112 /*
113  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114  * not configured.
115  */
116 uint8_t socket_num = UMA_NO_CONFIG;
117
118 /*
119  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120  */
121 uint8_t mp_anon = 0;
122
123 /*
124  * Record the Ethernet address of peer target ports to which packets are
125  * forwarded.
126  * Must be instantiated with the ethernet addresses of peer traffic generator
127  * ports.
128  */
129 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
130 portid_t nb_peer_eth_addrs = 0;
131
132 /*
133  * Probed Target Environment.
134  */
135 struct rte_port *ports;        /**< For all probed ethernet ports. */
136 portid_t nb_ports;             /**< Number of probed ethernet ports. */
137 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
138 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
139
140 /*
141  * Test Forwarding Configuration.
142  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
143  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
144  */
145 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
146 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
147 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
148 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
149
150 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
151 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
152
153 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
154 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
155
156 /*
157  * Forwarding engines.
158  */
159 struct fwd_engine * fwd_engines[] = {
160         &io_fwd_engine,
161         &mac_fwd_engine,
162         &mac_swap_engine,
163         &flow_gen_engine,
164         &rx_only_engine,
165         &tx_only_engine,
166         &csum_fwd_engine,
167         &icmp_echo_engine,
168 #ifdef RTE_LIBRTE_IEEE1588
169         &ieee1588_fwd_engine,
170 #endif
171         NULL,
172 };
173
174 struct fwd_config cur_fwd_config;
175 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
176 uint32_t retry_enabled;
177 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
178 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
179
180 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
181 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
182                                       * specified on command-line. */
183
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189         TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  */
214 #define RTE_TEST_RX_DESC_DEFAULT 128
215 #define RTE_TEST_TX_DESC_DEFAULT 512
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218
219 #define RTE_PMD_PARAM_UNSET -1
220 /*
221  * Configurable values of RX and TX ring threshold registers.
222  */
223
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
231
232 /*
233  * Configurable value of RX free threshold.
234  */
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
236
237 /*
238  * Configurable value of RX drop enable.
239  */
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
241
242 /*
243  * Configurable value of TX free threshold.
244  */
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
246
247 /*
248  * Configurable value of TX RS bit threshold.
249  */
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
251
252 /*
253  * Configurable value of TX queue flags.
254  */
255 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
256
257 /*
258  * Receive Side Scaling (RSS) configuration.
259  */
260 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
261
262 /*
263  * Port topology configuration
264  */
265 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
266
267 /*
268  * Avoids to flush all the RX streams before starts forwarding.
269  */
270 uint8_t no_flush_rx = 0; /* flush by default */
271
272 /*
273  * Avoids to check link status when starting/stopping a port.
274  */
275 uint8_t no_link_check = 0; /* check by default */
276
277 /*
278  * Enable link status change notification
279  */
280 uint8_t lsc_interrupt = 1; /* enabled by default */
281
282 /*
283  * Enable device removal notification.
284  */
285 uint8_t rmv_interrupt = 1; /* enabled by default */
286
287 /*
288  * Display or mask ether events
289  * Default to all events except VF_MBOX
290  */
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
297
298 /*
299  * NIC bypass mode configuration options.
300  */
301
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
305 #endif
306
307
308 #ifdef RTE_LIBRTE_LATENCY_STATS
309
310 /*
311  * Set when latency stats is enabled in the commandline
312  */
313 uint8_t latencystats_enabled;
314
315 /*
316  * Lcore ID to serive latency statistics.
317  */
318 lcoreid_t latencystats_lcore_id = -1;
319
320 #endif
321
322 /*
323  * Ethernet device configuration.
324  */
325 struct rte_eth_rxmode rx_mode = {
326         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327         .split_hdr_size = 0,
328         .header_split   = 0, /**< Header Split disabled. */
329         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
330         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
331         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
332         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
333         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
334         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
335 };
336
337 struct rte_fdir_conf fdir_conf = {
338         .mode = RTE_FDIR_MODE_NONE,
339         .pballoc = RTE_FDIR_PBALLOC_64K,
340         .status = RTE_FDIR_REPORT_STATUS,
341         .mask = {
342                 .vlan_tci_mask = 0x0,
343                 .ipv4_mask     = {
344                         .src_ip = 0xFFFFFFFF,
345                         .dst_ip = 0xFFFFFFFF,
346                 },
347                 .ipv6_mask     = {
348                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
349                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
350                 },
351                 .src_port_mask = 0xFFFF,
352                 .dst_port_mask = 0xFFFF,
353                 .mac_addr_byte_mask = 0xFF,
354                 .tunnel_type_mask = 1,
355                 .tunnel_id_mask = 0xFFFFFFFF,
356         },
357         .drop_queue = 127,
358 };
359
360 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
361
362 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
363 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
364
365 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
366 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
367
368 uint16_t nb_tx_queue_stats_mappings = 0;
369 uint16_t nb_rx_queue_stats_mappings = 0;
370
371 unsigned int num_sockets = 0;
372 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
373
374 #ifdef RTE_LIBRTE_BITRATE
375 /* Bitrate statistics */
376 struct rte_stats_bitrates *bitrate_data;
377 lcoreid_t bitrate_lcore_id;
378 uint8_t bitrate_enabled;
379 #endif
380
381 /* Forward function declarations */
382 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
383 static void check_all_ports_link_status(uint32_t port_mask);
384 static void eth_event_callback(uint8_t port_id,
385                                enum rte_eth_event_type type,
386                                void *param);
387
388 /*
389  * Check if all the ports are started.
390  * If yes, return positive value. If not, return zero.
391  */
392 static int all_ports_started(void);
393
394 /*
395  * Helper function to check if socket is already discovered.
396  * If yes, return positive value. If not, return zero.
397  */
398 int
399 new_socket_id(unsigned int socket_id)
400 {
401         unsigned int i;
402
403         for (i = 0; i < num_sockets; i++) {
404                 if (socket_ids[i] == socket_id)
405                         return 0;
406         }
407         return 1;
408 }
409
410 /*
411  * Setup default configuration.
412  */
413 static void
414 set_default_fwd_lcores_config(void)
415 {
416         unsigned int i;
417         unsigned int nb_lc;
418         unsigned int sock_num;
419
420         nb_lc = 0;
421         for (i = 0; i < RTE_MAX_LCORE; i++) {
422                 sock_num = rte_lcore_to_socket_id(i);
423                 if (new_socket_id(sock_num)) {
424                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
425                                 rte_exit(EXIT_FAILURE,
426                                          "Total sockets greater than %u\n",
427                                          RTE_MAX_NUMA_NODES);
428                         }
429                         socket_ids[num_sockets++] = sock_num;
430                 }
431                 if (!rte_lcore_is_enabled(i))
432                         continue;
433                 if (i == rte_get_master_lcore())
434                         continue;
435                 fwd_lcores_cpuids[nb_lc++] = i;
436         }
437         nb_lcores = (lcoreid_t) nb_lc;
438         nb_cfg_lcores = nb_lcores;
439         nb_fwd_lcores = 1;
440 }
441
442 static void
443 set_def_peer_eth_addrs(void)
444 {
445         portid_t i;
446
447         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
448                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
449                 peer_eth_addrs[i].addr_bytes[5] = i;
450         }
451 }
452
453 static void
454 set_default_fwd_ports_config(void)
455 {
456         portid_t pt_id;
457
458         for (pt_id = 0; pt_id < nb_ports; pt_id++)
459                 fwd_ports_ids[pt_id] = pt_id;
460
461         nb_cfg_ports = nb_ports;
462         nb_fwd_ports = nb_ports;
463 }
464
465 void
466 set_def_fwd_config(void)
467 {
468         set_default_fwd_lcores_config();
469         set_def_peer_eth_addrs();
470         set_default_fwd_ports_config();
471 }
472
473 /*
474  * Configuration initialisation done once at init time.
475  */
476 static void
477 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
478                  unsigned int socket_id)
479 {
480         char pool_name[RTE_MEMPOOL_NAMESIZE];
481         struct rte_mempool *rte_mp = NULL;
482         uint32_t mb_size;
483
484         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
485         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
486
487         RTE_LOG(INFO, USER1,
488                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
489                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
490
491 #ifdef RTE_LIBRTE_PMD_XENVIRT
492         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
493                 (unsigned) mb_mempool_cache,
494                 sizeof(struct rte_pktmbuf_pool_private),
495                 rte_pktmbuf_pool_init, NULL,
496                 rte_pktmbuf_init, NULL,
497                 socket_id, 0);
498 #endif
499
500         /* if the former XEN allocation failed fall back to normal allocation */
501         if (rte_mp == NULL) {
502                 if (mp_anon != 0) {
503                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504                                 mb_size, (unsigned) mb_mempool_cache,
505                                 sizeof(struct rte_pktmbuf_pool_private),
506                                 socket_id, 0);
507                         if (rte_mp == NULL)
508                                 goto err;
509
510                         if (rte_mempool_populate_anon(rte_mp) == 0) {
511                                 rte_mempool_free(rte_mp);
512                                 rte_mp = NULL;
513                                 goto err;
514                         }
515                         rte_pktmbuf_pool_init(rte_mp, NULL);
516                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
517                 } else {
518                         /* wrapper to rte_mempool_create() */
519                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
520                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
521                 }
522         }
523
524 err:
525         if (rte_mp == NULL) {
526                 rte_exit(EXIT_FAILURE,
527                         "Creation of mbuf pool for socket %u failed: %s\n",
528                         socket_id, rte_strerror(rte_errno));
529         } else if (verbose_level > 0) {
530                 rte_mempool_dump(stdout, rte_mp);
531         }
532 }
533
534 /*
535  * Check given socket id is valid or not with NUMA mode,
536  * if valid, return 0, else return -1
537  */
538 static int
539 check_socket_id(const unsigned int socket_id)
540 {
541         static int warning_once = 0;
542
543         if (new_socket_id(socket_id)) {
544                 if (!warning_once && numa_support)
545                         printf("Warning: NUMA should be configured manually by"
546                                " using --port-numa-config and"
547                                " --ring-numa-config parameters along with"
548                                " --numa.\n");
549                 warning_once = 1;
550                 return -1;
551         }
552         return 0;
553 }
554
555 static void
556 init_config(void)
557 {
558         portid_t pid;
559         struct rte_port *port;
560         struct rte_mempool *mbp;
561         unsigned int nb_mbuf_per_pool;
562         lcoreid_t  lc_id;
563         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
564
565         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
566
567         if (numa_support) {
568                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
569                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571         }
572
573         /* Configuration of logical cores. */
574         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
575                                 sizeof(struct fwd_lcore *) * nb_lcores,
576                                 RTE_CACHE_LINE_SIZE);
577         if (fwd_lcores == NULL) {
578                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
579                                                         "failed\n", nb_lcores);
580         }
581         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
582                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
583                                                sizeof(struct fwd_lcore),
584                                                RTE_CACHE_LINE_SIZE);
585                 if (fwd_lcores[lc_id] == NULL) {
586                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
587                                                                 "failed\n");
588                 }
589                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
590         }
591
592         RTE_ETH_FOREACH_DEV(pid) {
593                 port = &ports[pid];
594                 rte_eth_dev_info_get(pid, &port->dev_info);
595
596                 if (numa_support) {
597                         if (port_numa[pid] != NUMA_NO_CONFIG)
598                                 port_per_socket[port_numa[pid]]++;
599                         else {
600                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
601
602                                 /* if socket_id is invalid, set to 0 */
603                                 if (check_socket_id(socket_id) < 0)
604                                         socket_id = 0;
605                                 port_per_socket[socket_id]++;
606                         }
607                 }
608
609                 /* set flag to initialize port/queue */
610                 port->need_reconfig = 1;
611                 port->need_reconfig_queues = 1;
612         }
613
614         /*
615          * Create pools of mbuf.
616          * If NUMA support is disabled, create a single pool of mbuf in
617          * socket 0 memory by default.
618          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
619          *
620          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
621          * nb_txd can be configured at run time.
622          */
623         if (param_total_num_mbufs)
624                 nb_mbuf_per_pool = param_total_num_mbufs;
625         else {
626                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
627                         (nb_lcores * mb_mempool_cache) +
628                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
629                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
630         }
631
632         if (numa_support) {
633                 uint8_t i;
634
635                 for (i = 0; i < num_sockets; i++)
636                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
637                                          socket_ids[i]);
638         } else {
639                 if (socket_num == UMA_NO_CONFIG)
640                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
641                 else
642                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
643                                                  socket_num);
644         }
645
646         init_port_config();
647
648         /*
649          * Records which Mbuf pool to use by each logical core, if needed.
650          */
651         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
652                 mbp = mbuf_pool_find(
653                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
654
655                 if (mbp == NULL)
656                         mbp = mbuf_pool_find(0);
657                 fwd_lcores[lc_id]->mbp = mbp;
658         }
659
660         /* Configuration of packet forwarding streams. */
661         if (init_fwd_streams() < 0)
662                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
663
664         fwd_config_setup();
665 }
666
667
668 void
669 reconfig(portid_t new_port_id, unsigned socket_id)
670 {
671         struct rte_port *port;
672
673         /* Reconfiguration of Ethernet ports. */
674         port = &ports[new_port_id];
675         rte_eth_dev_info_get(new_port_id, &port->dev_info);
676
677         /* set flag to initialize port/queue */
678         port->need_reconfig = 1;
679         port->need_reconfig_queues = 1;
680         port->socket_id = socket_id;
681
682         init_port_config();
683 }
684
685
686 int
687 init_fwd_streams(void)
688 {
689         portid_t pid;
690         struct rte_port *port;
691         streamid_t sm_id, nb_fwd_streams_new;
692         queueid_t q;
693
694         /* set socket id according to numa or not */
695         RTE_ETH_FOREACH_DEV(pid) {
696                 port = &ports[pid];
697                 if (nb_rxq > port->dev_info.max_rx_queues) {
698                         printf("Fail: nb_rxq(%d) is greater than "
699                                 "max_rx_queues(%d)\n", nb_rxq,
700                                 port->dev_info.max_rx_queues);
701                         return -1;
702                 }
703                 if (nb_txq > port->dev_info.max_tx_queues) {
704                         printf("Fail: nb_txq(%d) is greater than "
705                                 "max_tx_queues(%d)\n", nb_txq,
706                                 port->dev_info.max_tx_queues);
707                         return -1;
708                 }
709                 if (numa_support) {
710                         if (port_numa[pid] != NUMA_NO_CONFIG)
711                                 port->socket_id = port_numa[pid];
712                         else {
713                                 port->socket_id = rte_eth_dev_socket_id(pid);
714
715                                 /* if socket_id is invalid, set to 0 */
716                                 if (check_socket_id(port->socket_id) < 0)
717                                         port->socket_id = 0;
718                         }
719                 }
720                 else {
721                         if (socket_num == UMA_NO_CONFIG)
722                                 port->socket_id = 0;
723                         else
724                                 port->socket_id = socket_num;
725                 }
726         }
727
728         q = RTE_MAX(nb_rxq, nb_txq);
729         if (q == 0) {
730                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
731                 return -1;
732         }
733         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
734         if (nb_fwd_streams_new == nb_fwd_streams)
735                 return 0;
736         /* clear the old */
737         if (fwd_streams != NULL) {
738                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
739                         if (fwd_streams[sm_id] == NULL)
740                                 continue;
741                         rte_free(fwd_streams[sm_id]);
742                         fwd_streams[sm_id] = NULL;
743                 }
744                 rte_free(fwd_streams);
745                 fwd_streams = NULL;
746         }
747
748         /* init new */
749         nb_fwd_streams = nb_fwd_streams_new;
750         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
751                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
752         if (fwd_streams == NULL)
753                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
754                                                 "failed\n", nb_fwd_streams);
755
756         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
757                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
758                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
759                 if (fwd_streams[sm_id] == NULL)
760                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
761                                                                 " failed\n");
762         }
763
764         return 0;
765 }
766
767 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
768 static void
769 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
770 {
771         unsigned int total_burst;
772         unsigned int nb_burst;
773         unsigned int burst_stats[3];
774         uint16_t pktnb_stats[3];
775         uint16_t nb_pkt;
776         int burst_percent[3];
777
778         /*
779          * First compute the total number of packet bursts and the
780          * two highest numbers of bursts of the same number of packets.
781          */
782         total_burst = 0;
783         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
784         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
785         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
786                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
787                 if (nb_burst == 0)
788                         continue;
789                 total_burst += nb_burst;
790                 if (nb_burst > burst_stats[0]) {
791                         burst_stats[1] = burst_stats[0];
792                         pktnb_stats[1] = pktnb_stats[0];
793                         burst_stats[0] = nb_burst;
794                         pktnb_stats[0] = nb_pkt;
795                 }
796         }
797         if (total_burst == 0)
798                 return;
799         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
800         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
801                burst_percent[0], (int) pktnb_stats[0]);
802         if (burst_stats[0] == total_burst) {
803                 printf("]\n");
804                 return;
805         }
806         if (burst_stats[0] + burst_stats[1] == total_burst) {
807                 printf(" + %d%% of %d pkts]\n",
808                        100 - burst_percent[0], pktnb_stats[1]);
809                 return;
810         }
811         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
812         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
813         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
814                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
815                 return;
816         }
817         printf(" + %d%% of %d pkts + %d%% of others]\n",
818                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
819 }
820 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
821
822 static void
823 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
824 {
825         struct rte_port *port;
826         uint8_t i;
827
828         static const char *fwd_stats_border = "----------------------";
829
830         port = &ports[port_id];
831         printf("\n  %s Forward statistics for port %-2d %s\n",
832                fwd_stats_border, port_id, fwd_stats_border);
833
834         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
835                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
836                        "%-"PRIu64"\n",
837                        stats->ipackets, stats->imissed,
838                        (uint64_t) (stats->ipackets + stats->imissed));
839
840                 if (cur_fwd_eng == &csum_fwd_engine)
841                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
842                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
843                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
844                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
845                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
846                 }
847
848                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
849                        "%-"PRIu64"\n",
850                        stats->opackets, port->tx_dropped,
851                        (uint64_t) (stats->opackets + port->tx_dropped));
852         }
853         else {
854                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
855                        "%14"PRIu64"\n",
856                        stats->ipackets, stats->imissed,
857                        (uint64_t) (stats->ipackets + stats->imissed));
858
859                 if (cur_fwd_eng == &csum_fwd_engine)
860                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
861                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
862                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
863                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
864                         printf("  RX-nombufs:             %14"PRIu64"\n",
865                                stats->rx_nombuf);
866                 }
867
868                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
869                        "%14"PRIu64"\n",
870                        stats->opackets, port->tx_dropped,
871                        (uint64_t) (stats->opackets + port->tx_dropped));
872         }
873
874 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
875         if (port->rx_stream)
876                 pkt_burst_stats_display("RX",
877                         &port->rx_stream->rx_burst_stats);
878         if (port->tx_stream)
879                 pkt_burst_stats_display("TX",
880                         &port->tx_stream->tx_burst_stats);
881 #endif
882
883         if (port->rx_queue_stats_mapping_enabled) {
884                 printf("\n");
885                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
886                         printf("  Stats reg %2d RX-packets:%14"PRIu64
887                                "     RX-errors:%14"PRIu64
888                                "    RX-bytes:%14"PRIu64"\n",
889                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
890                 }
891                 printf("\n");
892         }
893         if (port->tx_queue_stats_mapping_enabled) {
894                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895                         printf("  Stats reg %2d TX-packets:%14"PRIu64
896                                "                                 TX-bytes:%14"PRIu64"\n",
897                                i, stats->q_opackets[i], stats->q_obytes[i]);
898                 }
899         }
900
901         printf("  %s--------------------------------%s\n",
902                fwd_stats_border, fwd_stats_border);
903 }
904
905 static void
906 fwd_stream_stats_display(streamid_t stream_id)
907 {
908         struct fwd_stream *fs;
909         static const char *fwd_top_stats_border = "-------";
910
911         fs = fwd_streams[stream_id];
912         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
913             (fs->fwd_dropped == 0))
914                 return;
915         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
916                "TX Port=%2d/Queue=%2d %s\n",
917                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
918                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
919         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
920                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
921
922         /* if checksum mode */
923         if (cur_fwd_eng == &csum_fwd_engine) {
924                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
925                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
926         }
927
928 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
929         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
930         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
931 #endif
932 }
933
934 static void
935 flush_fwd_rx_queues(void)
936 {
937         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
938         portid_t  rxp;
939         portid_t port_id;
940         queueid_t rxq;
941         uint16_t  nb_rx;
942         uint16_t  i;
943         uint8_t   j;
944         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
945         uint64_t timer_period;
946
947         /* convert to number of cycles */
948         timer_period = rte_get_timer_hz(); /* 1 second timeout */
949
950         for (j = 0; j < 2; j++) {
951                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
952                         for (rxq = 0; rxq < nb_rxq; rxq++) {
953                                 port_id = fwd_ports_ids[rxp];
954                                 /**
955                                 * testpmd can stuck in the below do while loop
956                                 * if rte_eth_rx_burst() always returns nonzero
957                                 * packets. So timer is added to exit this loop
958                                 * after 1sec timer expiry.
959                                 */
960                                 prev_tsc = rte_rdtsc();
961                                 do {
962                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
963                                                 pkts_burst, MAX_PKT_BURST);
964                                         for (i = 0; i < nb_rx; i++)
965                                                 rte_pktmbuf_free(pkts_burst[i]);
966
967                                         cur_tsc = rte_rdtsc();
968                                         diff_tsc = cur_tsc - prev_tsc;
969                                         timer_tsc += diff_tsc;
970                                 } while ((nb_rx > 0) &&
971                                         (timer_tsc < timer_period));
972                                 timer_tsc = 0;
973                         }
974                 }
975                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
976         }
977 }
978
979 static void
980 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
981 {
982         struct fwd_stream **fsm;
983         streamid_t nb_fs;
984         streamid_t sm_id;
985 #ifdef RTE_LIBRTE_BITRATE
986         uint64_t tics_per_1sec;
987         uint64_t tics_datum;
988         uint64_t tics_current;
989         uint8_t idx_port, cnt_ports;
990
991         cnt_ports = rte_eth_dev_count();
992         tics_datum = rte_rdtsc();
993         tics_per_1sec = rte_get_timer_hz();
994 #endif
995         fsm = &fwd_streams[fc->stream_idx];
996         nb_fs = fc->stream_nb;
997         do {
998                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
999                         (*pkt_fwd)(fsm[sm_id]);
1000 #ifdef RTE_LIBRTE_BITRATE
1001                 if (bitrate_enabled != 0 &&
1002                                 bitrate_lcore_id == rte_lcore_id()) {
1003                         tics_current = rte_rdtsc();
1004                         if (tics_current - tics_datum >= tics_per_1sec) {
1005                                 /* Periodic bitrate calculation */
1006                                 for (idx_port = 0;
1007                                                 idx_port < cnt_ports;
1008                                                 idx_port++)
1009                                         rte_stats_bitrate_calc(bitrate_data,
1010                                                 idx_port);
1011                                 tics_datum = tics_current;
1012                         }
1013                 }
1014 #endif
1015 #ifdef RTE_LIBRTE_LATENCY_STATS
1016                 if (latencystats_enabled != 0 &&
1017                                 latencystats_lcore_id == rte_lcore_id())
1018                         rte_latencystats_update();
1019 #endif
1020
1021         } while (! fc->stopped);
1022 }
1023
1024 static int
1025 start_pkt_forward_on_core(void *fwd_arg)
1026 {
1027         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1028                              cur_fwd_config.fwd_eng->packet_fwd);
1029         return 0;
1030 }
1031
1032 /*
1033  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1034  * Used to start communication flows in network loopback test configurations.
1035  */
1036 static int
1037 run_one_txonly_burst_on_core(void *fwd_arg)
1038 {
1039         struct fwd_lcore *fwd_lc;
1040         struct fwd_lcore tmp_lcore;
1041
1042         fwd_lc = (struct fwd_lcore *) fwd_arg;
1043         tmp_lcore = *fwd_lc;
1044         tmp_lcore.stopped = 1;
1045         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1046         return 0;
1047 }
1048
1049 /*
1050  * Launch packet forwarding:
1051  *     - Setup per-port forwarding context.
1052  *     - launch logical cores with their forwarding configuration.
1053  */
1054 static void
1055 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1056 {
1057         port_fwd_begin_t port_fwd_begin;
1058         unsigned int i;
1059         unsigned int lc_id;
1060         int diag;
1061
1062         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1063         if (port_fwd_begin != NULL) {
1064                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1065                         (*port_fwd_begin)(fwd_ports_ids[i]);
1066         }
1067         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1068                 lc_id = fwd_lcores_cpuids[i];
1069                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1070                         fwd_lcores[i]->stopped = 0;
1071                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1072                                                      fwd_lcores[i], lc_id);
1073                         if (diag != 0)
1074                                 printf("launch lcore %u failed - diag=%d\n",
1075                                        lc_id, diag);
1076                 }
1077         }
1078 }
1079
1080 /*
1081  * Launch packet forwarding configuration.
1082  */
1083 void
1084 start_packet_forwarding(int with_tx_first)
1085 {
1086         port_fwd_begin_t port_fwd_begin;
1087         port_fwd_end_t  port_fwd_end;
1088         struct rte_port *port;
1089         unsigned int i;
1090         portid_t   pt_id;
1091         streamid_t sm_id;
1092
1093         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1094                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1095
1096         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1097                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1098
1099         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1100                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1101                 (!nb_rxq || !nb_txq))
1102                 rte_exit(EXIT_FAILURE,
1103                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1104                         cur_fwd_eng->fwd_mode_name);
1105
1106         if (all_ports_started() == 0) {
1107                 printf("Not all ports were started\n");
1108                 return;
1109         }
1110         if (test_done == 0) {
1111                 printf("Packet forwarding already started\n");
1112                 return;
1113         }
1114
1115         if (init_fwd_streams() < 0) {
1116                 printf("Fail from init_fwd_streams()\n");
1117                 return;
1118         }
1119
1120         if(dcb_test) {
1121                 for (i = 0; i < nb_fwd_ports; i++) {
1122                         pt_id = fwd_ports_ids[i];
1123                         port = &ports[pt_id];
1124                         if (!port->dcb_flag) {
1125                                 printf("In DCB mode, all forwarding ports must "
1126                                        "be configured in this mode.\n");
1127                                 return;
1128                         }
1129                 }
1130                 if (nb_fwd_lcores == 1) {
1131                         printf("In DCB mode,the nb forwarding cores "
1132                                "should be larger than 1.\n");
1133                         return;
1134                 }
1135         }
1136         test_done = 0;
1137
1138         if(!no_flush_rx)
1139                 flush_fwd_rx_queues();
1140
1141         fwd_config_setup();
1142         pkt_fwd_config_display(&cur_fwd_config);
1143         rxtx_config_display();
1144
1145         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146                 pt_id = fwd_ports_ids[i];
1147                 port = &ports[pt_id];
1148                 rte_eth_stats_get(pt_id, &port->stats);
1149                 port->tx_dropped = 0;
1150
1151                 map_port_queue_stats_mapping_registers(pt_id, port);
1152         }
1153         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1154                 fwd_streams[sm_id]->rx_packets = 0;
1155                 fwd_streams[sm_id]->tx_packets = 0;
1156                 fwd_streams[sm_id]->fwd_dropped = 0;
1157                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1158                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1159
1160 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1161                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1162                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1163                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1164                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1165 #endif
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167                 fwd_streams[sm_id]->core_cycles = 0;
1168 #endif
1169         }
1170         if (with_tx_first) {
1171                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1172                 if (port_fwd_begin != NULL) {
1173                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1174                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1175                 }
1176                 while (with_tx_first--) {
1177                         launch_packet_forwarding(
1178                                         run_one_txonly_burst_on_core);
1179                         rte_eal_mp_wait_lcore();
1180                 }
1181                 port_fwd_end = tx_only_engine.port_fwd_end;
1182                 if (port_fwd_end != NULL) {
1183                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1184                                 (*port_fwd_end)(fwd_ports_ids[i]);
1185                 }
1186         }
1187         launch_packet_forwarding(start_pkt_forward_on_core);
1188 }
1189
1190 void
1191 stop_packet_forwarding(void)
1192 {
1193         struct rte_eth_stats stats;
1194         struct rte_port *port;
1195         port_fwd_end_t  port_fwd_end;
1196         int i;
1197         portid_t   pt_id;
1198         streamid_t sm_id;
1199         lcoreid_t  lc_id;
1200         uint64_t total_recv;
1201         uint64_t total_xmit;
1202         uint64_t total_rx_dropped;
1203         uint64_t total_tx_dropped;
1204         uint64_t total_rx_nombuf;
1205         uint64_t tx_dropped;
1206         uint64_t rx_bad_ip_csum;
1207         uint64_t rx_bad_l4_csum;
1208 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1209         uint64_t fwd_cycles;
1210 #endif
1211         static const char *acc_stats_border = "+++++++++++++++";
1212
1213         if (test_done) {
1214                 printf("Packet forwarding not started\n");
1215                 return;
1216         }
1217         printf("Telling cores to stop...");
1218         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1219                 fwd_lcores[lc_id]->stopped = 1;
1220         printf("\nWaiting for lcores to finish...\n");
1221         rte_eal_mp_wait_lcore();
1222         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1223         if (port_fwd_end != NULL) {
1224                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1225                         pt_id = fwd_ports_ids[i];
1226                         (*port_fwd_end)(pt_id);
1227                 }
1228         }
1229 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1230         fwd_cycles = 0;
1231 #endif
1232         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1233                 if (cur_fwd_config.nb_fwd_streams >
1234                     cur_fwd_config.nb_fwd_ports) {
1235                         fwd_stream_stats_display(sm_id);
1236                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1237                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1238                 } else {
1239                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1240                                 fwd_streams[sm_id];
1241                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1242                                 fwd_streams[sm_id];
1243                 }
1244                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1245                 tx_dropped = (uint64_t) (tx_dropped +
1246                                          fwd_streams[sm_id]->fwd_dropped);
1247                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1248
1249                 rx_bad_ip_csum =
1250                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1251                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1252                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1253                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1254                                                         rx_bad_ip_csum;
1255
1256                 rx_bad_l4_csum =
1257                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1258                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1259                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1260                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1261                                                         rx_bad_l4_csum;
1262
1263 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1264                 fwd_cycles = (uint64_t) (fwd_cycles +
1265                                          fwd_streams[sm_id]->core_cycles);
1266 #endif
1267         }
1268         total_recv = 0;
1269         total_xmit = 0;
1270         total_rx_dropped = 0;
1271         total_tx_dropped = 0;
1272         total_rx_nombuf  = 0;
1273         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1274                 pt_id = fwd_ports_ids[i];
1275
1276                 port = &ports[pt_id];
1277                 rte_eth_stats_get(pt_id, &stats);
1278                 stats.ipackets -= port->stats.ipackets;
1279                 port->stats.ipackets = 0;
1280                 stats.opackets -= port->stats.opackets;
1281                 port->stats.opackets = 0;
1282                 stats.ibytes   -= port->stats.ibytes;
1283                 port->stats.ibytes = 0;
1284                 stats.obytes   -= port->stats.obytes;
1285                 port->stats.obytes = 0;
1286                 stats.imissed  -= port->stats.imissed;
1287                 port->stats.imissed = 0;
1288                 stats.oerrors  -= port->stats.oerrors;
1289                 port->stats.oerrors = 0;
1290                 stats.rx_nombuf -= port->stats.rx_nombuf;
1291                 port->stats.rx_nombuf = 0;
1292
1293                 total_recv += stats.ipackets;
1294                 total_xmit += stats.opackets;
1295                 total_rx_dropped += stats.imissed;
1296                 total_tx_dropped += port->tx_dropped;
1297                 total_rx_nombuf  += stats.rx_nombuf;
1298
1299                 fwd_port_stats_display(pt_id, &stats);
1300         }
1301         printf("\n  %s Accumulated forward statistics for all ports"
1302                "%s\n",
1303                acc_stats_border, acc_stats_border);
1304         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1305                "%-"PRIu64"\n"
1306                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1307                "%-"PRIu64"\n",
1308                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1309                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1310         if (total_rx_nombuf > 0)
1311                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1312         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1313                "%s\n",
1314                acc_stats_border, acc_stats_border);
1315 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1316         if (total_recv > 0)
1317                 printf("\n  CPU cycles/packet=%u (total cycles="
1318                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1319                        (unsigned int)(fwd_cycles / total_recv),
1320                        fwd_cycles, total_recv);
1321 #endif
1322         printf("\nDone.\n");
1323         test_done = 1;
1324 }
1325
1326 void
1327 dev_set_link_up(portid_t pid)
1328 {
1329         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1330                 printf("\nSet link up fail.\n");
1331 }
1332
1333 void
1334 dev_set_link_down(portid_t pid)
1335 {
1336         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1337                 printf("\nSet link down fail.\n");
1338 }
1339
1340 static int
1341 all_ports_started(void)
1342 {
1343         portid_t pi;
1344         struct rte_port *port;
1345
1346         RTE_ETH_FOREACH_DEV(pi) {
1347                 port = &ports[pi];
1348                 /* Check if there is a port which is not started */
1349                 if ((port->port_status != RTE_PORT_STARTED) &&
1350                         (port->slave_flag == 0))
1351                         return 0;
1352         }
1353
1354         /* No port is not started */
1355         return 1;
1356 }
1357
1358 int
1359 all_ports_stopped(void)
1360 {
1361         portid_t pi;
1362         struct rte_port *port;
1363
1364         RTE_ETH_FOREACH_DEV(pi) {
1365                 port = &ports[pi];
1366                 if ((port->port_status != RTE_PORT_STOPPED) &&
1367                         (port->slave_flag == 0))
1368                         return 0;
1369         }
1370
1371         return 1;
1372 }
1373
1374 int
1375 port_is_started(portid_t port_id)
1376 {
1377         if (port_id_is_invalid(port_id, ENABLED_WARN))
1378                 return 0;
1379
1380         if (ports[port_id].port_status != RTE_PORT_STARTED)
1381                 return 0;
1382
1383         return 1;
1384 }
1385
1386 static int
1387 port_is_closed(portid_t port_id)
1388 {
1389         if (port_id_is_invalid(port_id, ENABLED_WARN))
1390                 return 0;
1391
1392         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1393                 return 0;
1394
1395         return 1;
1396 }
1397
1398 int
1399 start_port(portid_t pid)
1400 {
1401         int diag, need_check_link_status = -1;
1402         portid_t pi;
1403         queueid_t qi;
1404         struct rte_port *port;
1405         struct ether_addr mac_addr;
1406         enum rte_eth_event_type event_type;
1407
1408         if (port_id_is_invalid(pid, ENABLED_WARN))
1409                 return 0;
1410
1411         if(dcb_config)
1412                 dcb_test = 1;
1413         RTE_ETH_FOREACH_DEV(pi) {
1414                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1415                         continue;
1416
1417                 need_check_link_status = 0;
1418                 port = &ports[pi];
1419                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1420                                                  RTE_PORT_HANDLING) == 0) {
1421                         printf("Port %d is now not stopped\n", pi);
1422                         continue;
1423                 }
1424
1425                 if (port->need_reconfig > 0) {
1426                         port->need_reconfig = 0;
1427
1428                         printf("Configuring Port %d (socket %u)\n", pi,
1429                                         port->socket_id);
1430                         /* configure port */
1431                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1432                                                 &(port->dev_conf));
1433                         if (diag != 0) {
1434                                 if (rte_atomic16_cmpset(&(port->port_status),
1435                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1436                                         printf("Port %d can not be set back "
1437                                                         "to stopped\n", pi);
1438                                 printf("Fail to configure port %d\n", pi);
1439                                 /* try to reconfigure port next time */
1440                                 port->need_reconfig = 1;
1441                                 return -1;
1442                         }
1443                 }
1444                 if (port->need_reconfig_queues > 0) {
1445                         port->need_reconfig_queues = 0;
1446                         /* setup tx queues */
1447                         for (qi = 0; qi < nb_txq; qi++) {
1448                                 if ((numa_support) &&
1449                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1450                                         diag = rte_eth_tx_queue_setup(pi, qi,
1451                                                 nb_txd,txring_numa[pi],
1452                                                 &(port->tx_conf));
1453                                 else
1454                                         diag = rte_eth_tx_queue_setup(pi, qi,
1455                                                 nb_txd,port->socket_id,
1456                                                 &(port->tx_conf));
1457
1458                                 if (diag == 0)
1459                                         continue;
1460
1461                                 /* Fail to setup tx queue, return */
1462                                 if (rte_atomic16_cmpset(&(port->port_status),
1463                                                         RTE_PORT_HANDLING,
1464                                                         RTE_PORT_STOPPED) == 0)
1465                                         printf("Port %d can not be set back "
1466                                                         "to stopped\n", pi);
1467                                 printf("Fail to configure port %d tx queues\n", pi);
1468                                 /* try to reconfigure queues next time */
1469                                 port->need_reconfig_queues = 1;
1470                                 return -1;
1471                         }
1472                         /* setup rx queues */
1473                         for (qi = 0; qi < nb_rxq; qi++) {
1474                                 if ((numa_support) &&
1475                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1476                                         struct rte_mempool * mp =
1477                                                 mbuf_pool_find(rxring_numa[pi]);
1478                                         if (mp == NULL) {
1479                                                 printf("Failed to setup RX queue:"
1480                                                         "No mempool allocation"
1481                                                         " on the socket %d\n",
1482                                                         rxring_numa[pi]);
1483                                                 return -1;
1484                                         }
1485
1486                                         diag = rte_eth_rx_queue_setup(pi, qi,
1487                                              nb_rxd,rxring_numa[pi],
1488                                              &(port->rx_conf),mp);
1489                                 } else {
1490                                         struct rte_mempool *mp =
1491                                                 mbuf_pool_find(port->socket_id);
1492                                         if (mp == NULL) {
1493                                                 printf("Failed to setup RX queue:"
1494                                                         "No mempool allocation"
1495                                                         " on the socket %d\n",
1496                                                         port->socket_id);
1497                                                 return -1;
1498                                         }
1499                                         diag = rte_eth_rx_queue_setup(pi, qi,
1500                                              nb_rxd,port->socket_id,
1501                                              &(port->rx_conf), mp);
1502                                 }
1503                                 if (diag == 0)
1504                                         continue;
1505
1506                                 /* Fail to setup rx queue, return */
1507                                 if (rte_atomic16_cmpset(&(port->port_status),
1508                                                         RTE_PORT_HANDLING,
1509                                                         RTE_PORT_STOPPED) == 0)
1510                                         printf("Port %d can not be set back "
1511                                                         "to stopped\n", pi);
1512                                 printf("Fail to configure port %d rx queues\n", pi);
1513                                 /* try to reconfigure queues next time */
1514                                 port->need_reconfig_queues = 1;
1515                                 return -1;
1516                         }
1517                 }
1518
1519                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1520                      event_type < RTE_ETH_EVENT_MAX;
1521                      event_type++) {
1522                         diag = rte_eth_dev_callback_register(pi,
1523                                                         event_type,
1524                                                         eth_event_callback,
1525                                                         NULL);
1526                         if (diag) {
1527                                 printf("Failed to setup even callback for event %d\n",
1528                                         event_type);
1529                                 return -1;
1530                         }
1531                 }
1532
1533                 /* start port */
1534                 if (rte_eth_dev_start(pi) < 0) {
1535                         printf("Fail to start port %d\n", pi);
1536
1537                         /* Fail to setup rx queue, return */
1538                         if (rte_atomic16_cmpset(&(port->port_status),
1539                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1540                                 printf("Port %d can not be set back to "
1541                                                         "stopped\n", pi);
1542                         continue;
1543                 }
1544
1545                 if (rte_atomic16_cmpset(&(port->port_status),
1546                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1547                         printf("Port %d can not be set into started\n", pi);
1548
1549                 rte_eth_macaddr_get(pi, &mac_addr);
1550                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1551                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1552                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1553                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1554
1555                 /* at least one port started, need checking link status */
1556                 need_check_link_status = 1;
1557         }
1558
1559         if (need_check_link_status == 1 && !no_link_check)
1560                 check_all_ports_link_status(RTE_PORT_ALL);
1561         else if (need_check_link_status == 0)
1562                 printf("Please stop the ports first\n");
1563
1564         printf("Done\n");
1565         return 0;
1566 }
1567
1568 void
1569 stop_port(portid_t pid)
1570 {
1571         portid_t pi;
1572         struct rte_port *port;
1573         int need_check_link_status = 0;
1574
1575         if (dcb_test) {
1576                 dcb_test = 0;
1577                 dcb_config = 0;
1578         }
1579
1580         if (port_id_is_invalid(pid, ENABLED_WARN))
1581                 return;
1582
1583         printf("Stopping ports...\n");
1584
1585         RTE_ETH_FOREACH_DEV(pi) {
1586                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1587                         continue;
1588
1589                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590                         printf("Please remove port %d from forwarding configuration.\n", pi);
1591                         continue;
1592                 }
1593
1594                 if (port_is_bonding_slave(pi)) {
1595                         printf("Please remove port %d from bonded device.\n", pi);
1596                         continue;
1597                 }
1598
1599                 port = &ports[pi];
1600                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1601                                                 RTE_PORT_HANDLING) == 0)
1602                         continue;
1603
1604                 rte_eth_dev_stop(pi);
1605
1606                 if (rte_atomic16_cmpset(&(port->port_status),
1607                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1608                         printf("Port %d can not be set into stopped\n", pi);
1609                 need_check_link_status = 1;
1610         }
1611         if (need_check_link_status && !no_link_check)
1612                 check_all_ports_link_status(RTE_PORT_ALL);
1613
1614         printf("Done\n");
1615 }
1616
1617 void
1618 close_port(portid_t pid)
1619 {
1620         portid_t pi;
1621         struct rte_port *port;
1622
1623         if (port_id_is_invalid(pid, ENABLED_WARN))
1624                 return;
1625
1626         printf("Closing ports...\n");
1627
1628         RTE_ETH_FOREACH_DEV(pi) {
1629                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1630                         continue;
1631
1632                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1633                         printf("Please remove port %d from forwarding configuration.\n", pi);
1634                         continue;
1635                 }
1636
1637                 if (port_is_bonding_slave(pi)) {
1638                         printf("Please remove port %d from bonded device.\n", pi);
1639                         continue;
1640                 }
1641
1642                 port = &ports[pi];
1643                 if (rte_atomic16_cmpset(&(port->port_status),
1644                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1645                         printf("Port %d is already closed\n", pi);
1646                         continue;
1647                 }
1648
1649                 if (rte_atomic16_cmpset(&(port->port_status),
1650                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1651                         printf("Port %d is now not stopped\n", pi);
1652                         continue;
1653                 }
1654
1655                 if (port->flow_list)
1656                         port_flow_flush(pi);
1657                 rte_eth_dev_close(pi);
1658
1659                 if (rte_atomic16_cmpset(&(port->port_status),
1660                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1661                         printf("Port %d cannot be set to closed\n", pi);
1662         }
1663
1664         printf("Done\n");
1665 }
1666
1667 void
1668 attach_port(char *identifier)
1669 {
1670         portid_t pi = 0;
1671         unsigned int socket_id;
1672
1673         printf("Attaching a new port...\n");
1674
1675         if (identifier == NULL) {
1676                 printf("Invalid parameters are specified\n");
1677                 return;
1678         }
1679
1680         if (rte_eth_dev_attach(identifier, &pi))
1681                 return;
1682
1683         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1684         /* if socket_id is invalid, set to 0 */
1685         if (check_socket_id(socket_id) < 0)
1686                 socket_id = 0;
1687         reconfig(pi, socket_id);
1688         rte_eth_promiscuous_enable(pi);
1689
1690         nb_ports = rte_eth_dev_count();
1691
1692         ports[pi].port_status = RTE_PORT_STOPPED;
1693
1694         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1695         printf("Done\n");
1696 }
1697
1698 void
1699 detach_port(uint8_t port_id)
1700 {
1701         char name[RTE_ETH_NAME_MAX_LEN];
1702
1703         printf("Detaching a port...\n");
1704
1705         if (!port_is_closed(port_id)) {
1706                 printf("Please close port first\n");
1707                 return;
1708         }
1709
1710         if (ports[port_id].flow_list)
1711                 port_flow_flush(port_id);
1712
1713         if (rte_eth_dev_detach(port_id, name))
1714                 return;
1715
1716         nb_ports = rte_eth_dev_count();
1717
1718         printf("Port '%s' is detached. Now total ports is %d\n",
1719                         name, nb_ports);
1720         printf("Done\n");
1721         return;
1722 }
1723
1724 void
1725 pmd_test_exit(void)
1726 {
1727         portid_t pt_id;
1728
1729         if (test_done == 0)
1730                 stop_packet_forwarding();
1731
1732         if (ports != NULL) {
1733                 no_link_check = 1;
1734                 RTE_ETH_FOREACH_DEV(pt_id) {
1735                         printf("\nShutting down port %d...\n", pt_id);
1736                         fflush(stdout);
1737                         stop_port(pt_id);
1738                         close_port(pt_id);
1739                 }
1740         }
1741         printf("\nBye...\n");
1742 }
1743
1744 typedef void (*cmd_func_t)(void);
1745 struct pmd_test_command {
1746         const char *cmd_name;
1747         cmd_func_t cmd_func;
1748 };
1749
1750 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1751
1752 /* Check the link status of all ports in up to 9s, and print them finally */
1753 static void
1754 check_all_ports_link_status(uint32_t port_mask)
1755 {
1756 #define CHECK_INTERVAL 100 /* 100ms */
1757 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1758         uint8_t portid, count, all_ports_up, print_flag = 0;
1759         struct rte_eth_link link;
1760
1761         printf("Checking link statuses...\n");
1762         fflush(stdout);
1763         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1764                 all_ports_up = 1;
1765                 RTE_ETH_FOREACH_DEV(portid) {
1766                         if ((port_mask & (1 << portid)) == 0)
1767                                 continue;
1768                         memset(&link, 0, sizeof(link));
1769                         rte_eth_link_get_nowait(portid, &link);
1770                         /* print link status if flag set */
1771                         if (print_flag == 1) {
1772                                 if (link.link_status)
1773                                         printf("Port %d Link Up - speed %u "
1774                                                 "Mbps - %s\n", (uint8_t)portid,
1775                                                 (unsigned)link.link_speed,
1776                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1777                                         ("full-duplex") : ("half-duplex\n"));
1778                                 else
1779                                         printf("Port %d Link Down\n",
1780                                                 (uint8_t)portid);
1781                                 continue;
1782                         }
1783                         /* clear all_ports_up flag if any link down */
1784                         if (link.link_status == ETH_LINK_DOWN) {
1785                                 all_ports_up = 0;
1786                                 break;
1787                         }
1788                 }
1789                 /* after finally printing all link status, get out */
1790                 if (print_flag == 1)
1791                         break;
1792
1793                 if (all_ports_up == 0) {
1794                         fflush(stdout);
1795                         rte_delay_ms(CHECK_INTERVAL);
1796                 }
1797
1798                 /* set the print_flag if all ports up or timeout */
1799                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1800                         print_flag = 1;
1801                 }
1802
1803                 if (lsc_interrupt)
1804                         break;
1805         }
1806 }
1807
1808 static void
1809 rmv_event_callback(void *arg)
1810 {
1811         struct rte_eth_dev *dev;
1812         struct rte_devargs *da;
1813         char name[32] = "";
1814         uint8_t port_id = (intptr_t)arg;
1815
1816         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1817         dev = &rte_eth_devices[port_id];
1818         da = dev->device->devargs;
1819
1820         stop_port(port_id);
1821         close_port(port_id);
1822         if (da->type == RTE_DEVTYPE_VIRTUAL)
1823                 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1824         else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1825                 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1826         printf("removing device %s\n", name);
1827         rte_eal_dev_detach(name);
1828         dev->state = RTE_ETH_DEV_UNUSED;
1829 }
1830
1831 /* This function is used by the interrupt thread */
1832 static void
1833 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1834 {
1835         static const char * const event_desc[] = {
1836                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1837                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1838                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1839                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1840                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1841                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1842                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1843                 [RTE_ETH_EVENT_MAX] = NULL,
1844         };
1845
1846         RTE_SET_USED(param);
1847
1848         if (type >= RTE_ETH_EVENT_MAX) {
1849                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1850                         port_id, __func__, type);
1851                 fflush(stderr);
1852         } else if (event_print_mask & (UINT32_C(1) << type)) {
1853                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1854                         event_desc[type]);
1855                 fflush(stdout);
1856         }
1857
1858         switch (type) {
1859         case RTE_ETH_EVENT_INTR_RMV:
1860                 if (rte_eal_alarm_set(100000,
1861                                 rmv_event_callback, (void *)(intptr_t)port_id))
1862                         fprintf(stderr, "Could not set up deferred device removal\n");
1863                 break;
1864         default:
1865                 break;
1866         }
1867 }
1868
1869 static int
1870 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1871 {
1872         uint16_t i;
1873         int diag;
1874         uint8_t mapping_found = 0;
1875
1876         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1877                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1878                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1879                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1880                                         tx_queue_stats_mappings[i].queue_id,
1881                                         tx_queue_stats_mappings[i].stats_counter_id);
1882                         if (diag != 0)
1883                                 return diag;
1884                         mapping_found = 1;
1885                 }
1886         }
1887         if (mapping_found)
1888                 port->tx_queue_stats_mapping_enabled = 1;
1889         return 0;
1890 }
1891
1892 static int
1893 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1894 {
1895         uint16_t i;
1896         int diag;
1897         uint8_t mapping_found = 0;
1898
1899         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1900                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1901                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1902                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1903                                         rx_queue_stats_mappings[i].queue_id,
1904                                         rx_queue_stats_mappings[i].stats_counter_id);
1905                         if (diag != 0)
1906                                 return diag;
1907                         mapping_found = 1;
1908                 }
1909         }
1910         if (mapping_found)
1911                 port->rx_queue_stats_mapping_enabled = 1;
1912         return 0;
1913 }
1914
1915 static void
1916 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1917 {
1918         int diag = 0;
1919
1920         diag = set_tx_queue_stats_mapping_registers(pi, port);
1921         if (diag != 0) {
1922                 if (diag == -ENOTSUP) {
1923                         port->tx_queue_stats_mapping_enabled = 0;
1924                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1925                 }
1926                 else
1927                         rte_exit(EXIT_FAILURE,
1928                                         "set_tx_queue_stats_mapping_registers "
1929                                         "failed for port id=%d diag=%d\n",
1930                                         pi, diag);
1931         }
1932
1933         diag = set_rx_queue_stats_mapping_registers(pi, port);
1934         if (diag != 0) {
1935                 if (diag == -ENOTSUP) {
1936                         port->rx_queue_stats_mapping_enabled = 0;
1937                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1938                 }
1939                 else
1940                         rte_exit(EXIT_FAILURE,
1941                                         "set_rx_queue_stats_mapping_registers "
1942                                         "failed for port id=%d diag=%d\n",
1943                                         pi, diag);
1944         }
1945 }
1946
1947 static void
1948 rxtx_port_config(struct rte_port *port)
1949 {
1950         port->rx_conf = port->dev_info.default_rxconf;
1951         port->tx_conf = port->dev_info.default_txconf;
1952
1953         /* Check if any RX/TX parameters have been passed */
1954         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1955                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1956
1957         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1958                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1959
1960         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1961                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1962
1963         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1964                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1965
1966         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1967                 port->rx_conf.rx_drop_en = rx_drop_en;
1968
1969         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1970                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1971
1972         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1973                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1974
1975         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1976                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1977
1978         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1979                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1980
1981         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1982                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1983
1984         if (txq_flags != RTE_PMD_PARAM_UNSET)
1985                 port->tx_conf.txq_flags = txq_flags;
1986 }
1987
1988 void
1989 init_port_config(void)
1990 {
1991         portid_t pid;
1992         struct rte_port *port;
1993
1994         RTE_ETH_FOREACH_DEV(pid) {
1995                 port = &ports[pid];
1996                 port->dev_conf.rxmode = rx_mode;
1997                 port->dev_conf.fdir_conf = fdir_conf;
1998                 if (nb_rxq > 1) {
1999                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2001                 } else {
2002                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2003                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2004                 }
2005
2006                 if (port->dcb_flag == 0) {
2007                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2008                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2009                         else
2010                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2011                 }
2012
2013                 rxtx_port_config(port);
2014
2015                 rte_eth_macaddr_get(pid, &port->eth_addr);
2016
2017                 map_port_queue_stats_mapping_registers(pid, port);
2018 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2019                 rte_pmd_ixgbe_bypass_init(pid);
2020 #endif
2021
2022                 if (lsc_interrupt &&
2023                     (rte_eth_devices[pid].data->dev_flags &
2024                      RTE_ETH_DEV_INTR_LSC))
2025                         port->dev_conf.intr_conf.lsc = 1;
2026                 if (rmv_interrupt &&
2027                     (rte_eth_devices[pid].data->dev_flags &
2028                      RTE_ETH_DEV_INTR_RMV))
2029                         port->dev_conf.intr_conf.rmv = 1;
2030         }
2031 }
2032
2033 void set_port_slave_flag(portid_t slave_pid)
2034 {
2035         struct rte_port *port;
2036
2037         port = &ports[slave_pid];
2038         port->slave_flag = 1;
2039 }
2040
2041 void clear_port_slave_flag(portid_t slave_pid)
2042 {
2043         struct rte_port *port;
2044
2045         port = &ports[slave_pid];
2046         port->slave_flag = 0;
2047 }
2048
2049 uint8_t port_is_bonding_slave(portid_t slave_pid)
2050 {
2051         struct rte_port *port;
2052
2053         port = &ports[slave_pid];
2054         return port->slave_flag;
2055 }
2056
2057 const uint16_t vlan_tags[] = {
2058                 0,  1,  2,  3,  4,  5,  6,  7,
2059                 8,  9, 10, 11,  12, 13, 14, 15,
2060                 16, 17, 18, 19, 20, 21, 22, 23,
2061                 24, 25, 26, 27, 28, 29, 30, 31
2062 };
2063
2064 static  int
2065 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2066                  enum dcb_mode_enable dcb_mode,
2067                  enum rte_eth_nb_tcs num_tcs,
2068                  uint8_t pfc_en)
2069 {
2070         uint8_t i;
2071
2072         /*
2073          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2074          * given above, and the number of traffic classes available for use.
2075          */
2076         if (dcb_mode == DCB_VT_ENABLED) {
2077                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2078                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2079                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2080                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2081
2082                 /* VMDQ+DCB RX and TX configurations */
2083                 vmdq_rx_conf->enable_default_pool = 0;
2084                 vmdq_rx_conf->default_pool = 0;
2085                 vmdq_rx_conf->nb_queue_pools =
2086                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2087                 vmdq_tx_conf->nb_queue_pools =
2088                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2089
2090                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2091                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2092                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2093                         vmdq_rx_conf->pool_map[i].pools =
2094                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2095                 }
2096                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2097                         vmdq_rx_conf->dcb_tc[i] = i;
2098                         vmdq_tx_conf->dcb_tc[i] = i;
2099                 }
2100
2101                 /* set DCB mode of RX and TX of multiple queues */
2102                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2103                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2104         } else {
2105                 struct rte_eth_dcb_rx_conf *rx_conf =
2106                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2107                 struct rte_eth_dcb_tx_conf *tx_conf =
2108                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2109
2110                 rx_conf->nb_tcs = num_tcs;
2111                 tx_conf->nb_tcs = num_tcs;
2112
2113                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2114                         rx_conf->dcb_tc[i] = i % num_tcs;
2115                         tx_conf->dcb_tc[i] = i % num_tcs;
2116                 }
2117                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2118                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2119                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2120         }
2121
2122         if (pfc_en)
2123                 eth_conf->dcb_capability_en =
2124                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2125         else
2126                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2127
2128         return 0;
2129 }
2130
2131 int
2132 init_port_dcb_config(portid_t pid,
2133                      enum dcb_mode_enable dcb_mode,
2134                      enum rte_eth_nb_tcs num_tcs,
2135                      uint8_t pfc_en)
2136 {
2137         struct rte_eth_conf port_conf;
2138         struct rte_port *rte_port;
2139         int retval;
2140         uint16_t i;
2141
2142         rte_port = &ports[pid];
2143
2144         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2145         /* Enter DCB configuration status */
2146         dcb_config = 1;
2147
2148         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2149         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2150         if (retval < 0)
2151                 return retval;
2152         port_conf.rxmode.hw_vlan_filter = 1;
2153
2154         /**
2155          * Write the configuration into the device.
2156          * Set the numbers of RX & TX queues to 0, so
2157          * the RX & TX queues will not be setup.
2158          */
2159         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2160
2161         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2162
2163         /* If dev_info.vmdq_pool_base is greater than 0,
2164          * the queue id of vmdq pools is started after pf queues.
2165          */
2166         if (dcb_mode == DCB_VT_ENABLED &&
2167             rte_port->dev_info.vmdq_pool_base > 0) {
2168                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2169                         " for port %d.", pid);
2170                 return -1;
2171         }
2172
2173         /* Assume the ports in testpmd have the same dcb capability
2174          * and has the same number of rxq and txq in dcb mode
2175          */
2176         if (dcb_mode == DCB_VT_ENABLED) {
2177                 if (rte_port->dev_info.max_vfs > 0) {
2178                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2179                         nb_txq = rte_port->dev_info.nb_tx_queues;
2180                 } else {
2181                         nb_rxq = rte_port->dev_info.max_rx_queues;
2182                         nb_txq = rte_port->dev_info.max_tx_queues;
2183                 }
2184         } else {
2185                 /*if vt is disabled, use all pf queues */
2186                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2187                         nb_rxq = rte_port->dev_info.max_rx_queues;
2188                         nb_txq = rte_port->dev_info.max_tx_queues;
2189                 } else {
2190                         nb_rxq = (queueid_t)num_tcs;
2191                         nb_txq = (queueid_t)num_tcs;
2192
2193                 }
2194         }
2195         rx_free_thresh = 64;
2196
2197         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2198
2199         rxtx_port_config(rte_port);
2200         /* VLAN filter */
2201         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2202         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2203                 rx_vft_set(pid, vlan_tags[i], 1);
2204
2205         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2206         map_port_queue_stats_mapping_registers(pid, rte_port);
2207
2208         rte_port->dcb_flag = 1;
2209
2210         return 0;
2211 }
2212
2213 static void
2214 init_port(void)
2215 {
2216         /* Configuration of Ethernet ports. */
2217         ports = rte_zmalloc("testpmd: ports",
2218                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2219                             RTE_CACHE_LINE_SIZE);
2220         if (ports == NULL) {
2221                 rte_exit(EXIT_FAILURE,
2222                                 "rte_zmalloc(%d struct rte_port) failed\n",
2223                                 RTE_MAX_ETHPORTS);
2224         }
2225 }
2226
2227 static void
2228 force_quit(void)
2229 {
2230         pmd_test_exit();
2231         prompt_exit();
2232 }
2233
2234 static void
2235 signal_handler(int signum)
2236 {
2237         if (signum == SIGINT || signum == SIGTERM) {
2238                 printf("\nSignal %d received, preparing to exit...\n",
2239                                 signum);
2240 #ifdef RTE_LIBRTE_PDUMP
2241                 /* uninitialize packet capture framework */
2242                 rte_pdump_uninit();
2243 #endif
2244 #ifdef RTE_LIBRTE_LATENCY_STATS
2245                 rte_latencystats_uninit();
2246 #endif
2247                 force_quit();
2248                 /* exit with the expected status */
2249                 signal(signum, SIG_DFL);
2250                 kill(getpid(), signum);
2251         }
2252 }
2253
2254 int
2255 main(int argc, char** argv)
2256 {
2257         int  diag;
2258         uint8_t port_id;
2259
2260         signal(SIGINT, signal_handler);
2261         signal(SIGTERM, signal_handler);
2262
2263         diag = rte_eal_init(argc, argv);
2264         if (diag < 0)
2265                 rte_panic("Cannot init EAL\n");
2266
2267 #ifdef RTE_LIBRTE_PDUMP
2268         /* initialize packet capture framework */
2269         rte_pdump_init(NULL);
2270 #endif
2271
2272         nb_ports = (portid_t) rte_eth_dev_count();
2273         if (nb_ports == 0)
2274                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2275
2276         /* allocate port structures, and init them */
2277         init_port();
2278
2279         set_def_fwd_config();
2280         if (nb_lcores == 0)
2281                 rte_panic("Empty set of forwarding logical cores - check the "
2282                           "core mask supplied in the command parameters\n");
2283
2284         /* Bitrate/latency stats disabled by default */
2285 #ifdef RTE_LIBRTE_BITRATE
2286         bitrate_enabled = 0;
2287 #endif
2288 #ifdef RTE_LIBRTE_LATENCY_STATS
2289         latencystats_enabled = 0;
2290 #endif
2291
2292         argc -= diag;
2293         argv += diag;
2294         if (argc > 1)
2295                 launch_args_parse(argc, argv);
2296
2297         if (!nb_rxq && !nb_txq)
2298                 printf("Warning: Either rx or tx queues should be non-zero\n");
2299
2300         if (nb_rxq > 1 && nb_rxq > nb_txq)
2301                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2302                        "but nb_txq=%d will prevent to fully test it.\n",
2303                        nb_rxq, nb_txq);
2304
2305         init_config();
2306         if (start_port(RTE_PORT_ALL) != 0)
2307                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2308
2309         /* set all ports to promiscuous mode by default */
2310         RTE_ETH_FOREACH_DEV(port_id)
2311                 rte_eth_promiscuous_enable(port_id);
2312
2313         /* Init metrics library */
2314         rte_metrics_init(rte_socket_id());
2315
2316 #ifdef RTE_LIBRTE_LATENCY_STATS
2317         if (latencystats_enabled != 0) {
2318                 int ret = rte_latencystats_init(1, NULL);
2319                 if (ret)
2320                         printf("Warning: latencystats init()"
2321                                 " returned error %d\n", ret);
2322                 printf("Latencystats running on lcore %d\n",
2323                         latencystats_lcore_id);
2324         }
2325 #endif
2326
2327         /* Setup bitrate stats */
2328 #ifdef RTE_LIBRTE_BITRATE
2329         if (bitrate_enabled != 0) {
2330                 bitrate_data = rte_stats_bitrate_create();
2331                 if (bitrate_data == NULL)
2332                         rte_exit(EXIT_FAILURE,
2333                                 "Could not allocate bitrate data.\n");
2334                 rte_stats_bitrate_reg(bitrate_data);
2335         }
2336 #endif
2337
2338 #ifdef RTE_LIBRTE_CMDLINE
2339         if (strlen(cmdline_filename) != 0)
2340                 cmdline_read_from_file(cmdline_filename);
2341
2342         if (interactive == 1) {
2343                 if (auto_start) {
2344                         printf("Start automatic packet forwarding\n");
2345                         start_packet_forwarding(0);
2346                 }
2347                 prompt();
2348                 pmd_test_exit();
2349         } else
2350 #endif
2351         {
2352                 char c;
2353                 int rc;
2354
2355                 printf("No commandline core given, start packet forwarding\n");
2356                 start_packet_forwarding(0);
2357                 printf("Press enter to exit\n");
2358                 rc = read(0, &c, 1);
2359                 pmd_test_exit();
2360                 if (rc < 0)
2361                         return 1;
2362         }
2363
2364         return 0;
2365 }