7a1e470d6e8656797e10252a57e42fbcb4b1e344
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79
80 #include "testpmd.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp = NULL;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419         RTE_LOG(INFO, USER1,
420                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
422
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425                 (unsigned) mb_mempool_cache,
426                 sizeof(struct rte_pktmbuf_pool_private),
427                 rte_pktmbuf_pool_init, NULL,
428                 rte_pktmbuf_init, NULL,
429                 socket_id, 0);
430 #endif
431
432         /* if the former XEN allocation failed fall back to normal allocation */
433         if (rte_mp == NULL) {
434                 if (mp_anon != 0) {
435                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436                                 mb_size, (unsigned) mb_mempool_cache,
437                                 sizeof(struct rte_pktmbuf_pool_private),
438                                 socket_id, 0);
439
440                         if (rte_mempool_populate_anon(rte_mp) == 0) {
441                                 rte_mempool_free(rte_mp);
442                                 rte_mp = NULL;
443                         }
444                         rte_pktmbuf_pool_init(rte_mp, NULL);
445                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
446                 } else {
447                         /* wrapper to rte_mempool_create() */
448                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
450                 }
451         }
452
453         if (rte_mp == NULL) {
454                 rte_exit(EXIT_FAILURE,
455                         "Creation of mbuf pool for socket %u failed: %s\n",
456                         socket_id, rte_strerror(rte_errno));
457         } else if (verbose_level > 0) {
458                 rte_mempool_dump(stdout, rte_mp);
459         }
460 }
461
462 /*
463  * Check given socket id is valid or not with NUMA mode,
464  * if valid, return 0, else return -1
465  */
466 static int
467 check_socket_id(const unsigned int socket_id)
468 {
469         static int warning_once = 0;
470
471         if (socket_id >= max_socket) {
472                 if (!warning_once && numa_support)
473                         printf("Warning: NUMA should be configured manually by"
474                                " using --port-numa-config and"
475                                " --ring-numa-config parameters along with"
476                                " --numa.\n");
477                 warning_once = 1;
478                 return -1;
479         }
480         return 0;
481 }
482
483 static void
484 init_config(void)
485 {
486         portid_t pid;
487         struct rte_port *port;
488         struct rte_mempool *mbp;
489         unsigned int nb_mbuf_per_pool;
490         lcoreid_t  lc_id;
491         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
492
493         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494         /* Configuration of logical cores. */
495         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496                                 sizeof(struct fwd_lcore *) * nb_lcores,
497                                 RTE_CACHE_LINE_SIZE);
498         if (fwd_lcores == NULL) {
499                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500                                                         "failed\n", nb_lcores);
501         }
502         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504                                                sizeof(struct fwd_lcore),
505                                                RTE_CACHE_LINE_SIZE);
506                 if (fwd_lcores[lc_id] == NULL) {
507                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
508                                                                 "failed\n");
509                 }
510                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
511         }
512
513         /*
514          * Create pools of mbuf.
515          * If NUMA support is disabled, create a single pool of mbuf in
516          * socket 0 memory by default.
517          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
518          *
519          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520          * nb_txd can be configured at run time.
521          */
522         if (param_total_num_mbufs)
523                 nb_mbuf_per_pool = param_total_num_mbufs;
524         else {
525                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
527
528                 if (!numa_support)
529                         nb_mbuf_per_pool =
530                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
531         }
532
533         if (!numa_support) {
534                 if (socket_num == UMA_NO_CONFIG)
535                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
536                 else
537                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
538                                                  socket_num);
539         }
540
541         FOREACH_PORT(pid, ports) {
542                 port = &ports[pid];
543                 rte_eth_dev_info_get(pid, &port->dev_info);
544
545                 if (numa_support) {
546                         if (port_numa[pid] != NUMA_NO_CONFIG)
547                                 port_per_socket[port_numa[pid]]++;
548                         else {
549                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
550
551                                 /* if socket_id is invalid, set to 0 */
552                                 if (check_socket_id(socket_id) < 0)
553                                         socket_id = 0;
554                                 port_per_socket[socket_id]++;
555                         }
556                 }
557
558                 /* set flag to initialize port/queue */
559                 port->need_reconfig = 1;
560                 port->need_reconfig_queues = 1;
561         }
562
563         if (numa_support) {
564                 uint8_t i;
565                 unsigned int nb_mbuf;
566
567                 if (param_total_num_mbufs)
568                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
569
570                 for (i = 0; i < max_socket; i++) {
571                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572                         if (nb_mbuf)
573                                 mbuf_pool_create(mbuf_data_size,
574                                                 nb_mbuf,i);
575                 }
576         }
577         init_port_config();
578
579         /*
580          * Records which Mbuf pool to use by each logical core, if needed.
581          */
582         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583                 mbp = mbuf_pool_find(
584                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
585
586                 if (mbp == NULL)
587                         mbp = mbuf_pool_find(0);
588                 fwd_lcores[lc_id]->mbp = mbp;
589         }
590
591         /* Configuration of packet forwarding streams. */
592         if (init_fwd_streams() < 0)
593                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
594 }
595
596
597 void
598 reconfig(portid_t new_port_id, unsigned socket_id)
599 {
600         struct rte_port *port;
601
602         /* Reconfiguration of Ethernet ports. */
603         port = &ports[new_port_id];
604         rte_eth_dev_info_get(new_port_id, &port->dev_info);
605
606         /* set flag to initialize port/queue */
607         port->need_reconfig = 1;
608         port->need_reconfig_queues = 1;
609         port->socket_id = socket_id;
610
611         init_port_config();
612 }
613
614
615 int
616 init_fwd_streams(void)
617 {
618         portid_t pid;
619         struct rte_port *port;
620         streamid_t sm_id, nb_fwd_streams_new;
621         queueid_t q;
622
623         /* set socket id according to numa or not */
624         FOREACH_PORT(pid, ports) {
625                 port = &ports[pid];
626                 if (nb_rxq > port->dev_info.max_rx_queues) {
627                         printf("Fail: nb_rxq(%d) is greater than "
628                                 "max_rx_queues(%d)\n", nb_rxq,
629                                 port->dev_info.max_rx_queues);
630                         return -1;
631                 }
632                 if (nb_txq > port->dev_info.max_tx_queues) {
633                         printf("Fail: nb_txq(%d) is greater than "
634                                 "max_tx_queues(%d)\n", nb_txq,
635                                 port->dev_info.max_tx_queues);
636                         return -1;
637                 }
638                 if (numa_support) {
639                         if (port_numa[pid] != NUMA_NO_CONFIG)
640                                 port->socket_id = port_numa[pid];
641                         else {
642                                 port->socket_id = rte_eth_dev_socket_id(pid);
643
644                                 /* if socket_id is invalid, set to 0 */
645                                 if (check_socket_id(port->socket_id) < 0)
646                                         port->socket_id = 0;
647                         }
648                 }
649                 else {
650                         if (socket_num == UMA_NO_CONFIG)
651                                 port->socket_id = 0;
652                         else
653                                 port->socket_id = socket_num;
654                 }
655         }
656
657         q = RTE_MAX(nb_rxq, nb_txq);
658         if (q == 0) {
659                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
660                 return -1;
661         }
662         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
663         if (nb_fwd_streams_new == nb_fwd_streams)
664                 return 0;
665         /* clear the old */
666         if (fwd_streams != NULL) {
667                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668                         if (fwd_streams[sm_id] == NULL)
669                                 continue;
670                         rte_free(fwd_streams[sm_id]);
671                         fwd_streams[sm_id] = NULL;
672                 }
673                 rte_free(fwd_streams);
674                 fwd_streams = NULL;
675         }
676
677         /* init new */
678         nb_fwd_streams = nb_fwd_streams_new;
679         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
681         if (fwd_streams == NULL)
682                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683                                                 "failed\n", nb_fwd_streams);
684
685         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
688                 if (fwd_streams[sm_id] == NULL)
689                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
690                                                                 " failed\n");
691         }
692
693         return 0;
694 }
695
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 static void
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 {
700         unsigned int total_burst;
701         unsigned int nb_burst;
702         unsigned int burst_stats[3];
703         uint16_t pktnb_stats[3];
704         uint16_t nb_pkt;
705         int burst_percent[3];
706
707         /*
708          * First compute the total number of packet bursts and the
709          * two highest numbers of bursts of the same number of packets.
710          */
711         total_burst = 0;
712         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
716                 if (nb_burst == 0)
717                         continue;
718                 total_burst += nb_burst;
719                 if (nb_burst > burst_stats[0]) {
720                         burst_stats[1] = burst_stats[0];
721                         pktnb_stats[1] = pktnb_stats[0];
722                         burst_stats[0] = nb_burst;
723                         pktnb_stats[0] = nb_pkt;
724                 }
725         }
726         if (total_burst == 0)
727                 return;
728         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730                burst_percent[0], (int) pktnb_stats[0]);
731         if (burst_stats[0] == total_burst) {
732                 printf("]\n");
733                 return;
734         }
735         if (burst_stats[0] + burst_stats[1] == total_burst) {
736                 printf(" + %d%% of %d pkts]\n",
737                        100 - burst_percent[0], pktnb_stats[1]);
738                 return;
739         }
740         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
744                 return;
745         }
746         printf(" + %d%% of %d pkts + %d%% of others]\n",
747                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 }
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
750
751 static void
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 {
754         struct rte_port *port;
755         uint8_t i;
756
757         static const char *fwd_stats_border = "----------------------";
758
759         port = &ports[port_id];
760         printf("\n  %s Forward statistics for port %-2d %s\n",
761                fwd_stats_border, port_id, fwd_stats_border);
762
763         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765                        "%-"PRIu64"\n",
766                        stats->ipackets, stats->imissed,
767                        (uint64_t) (stats->ipackets + stats->imissed));
768
769                 if (cur_fwd_eng == &csum_fwd_engine)
770                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
773                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
774                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
775                 }
776
777                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
778                        "%-"PRIu64"\n",
779                        stats->opackets, port->tx_dropped,
780                        (uint64_t) (stats->opackets + port->tx_dropped));
781         }
782         else {
783                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
784                        "%14"PRIu64"\n",
785                        stats->ipackets, stats->imissed,
786                        (uint64_t) (stats->ipackets + stats->imissed));
787
788                 if (cur_fwd_eng == &csum_fwd_engine)
789                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
790                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
792                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
793                         printf("  RX-nombufs:             %14"PRIu64"\n",
794                                stats->rx_nombuf);
795                 }
796
797                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
798                        "%14"PRIu64"\n",
799                        stats->opackets, port->tx_dropped,
800                        (uint64_t) (stats->opackets + port->tx_dropped));
801         }
802
803 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
804         if (port->rx_stream)
805                 pkt_burst_stats_display("RX",
806                         &port->rx_stream->rx_burst_stats);
807         if (port->tx_stream)
808                 pkt_burst_stats_display("TX",
809                         &port->tx_stream->tx_burst_stats);
810 #endif
811
812         if (port->rx_queue_stats_mapping_enabled) {
813                 printf("\n");
814                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
815                         printf("  Stats reg %2d RX-packets:%14"PRIu64
816                                "     RX-errors:%14"PRIu64
817                                "    RX-bytes:%14"PRIu64"\n",
818                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
819                 }
820                 printf("\n");
821         }
822         if (port->tx_queue_stats_mapping_enabled) {
823                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
824                         printf("  Stats reg %2d TX-packets:%14"PRIu64
825                                "                                 TX-bytes:%14"PRIu64"\n",
826                                i, stats->q_opackets[i], stats->q_obytes[i]);
827                 }
828         }
829
830         printf("  %s--------------------------------%s\n",
831                fwd_stats_border, fwd_stats_border);
832 }
833
834 static void
835 fwd_stream_stats_display(streamid_t stream_id)
836 {
837         struct fwd_stream *fs;
838         static const char *fwd_top_stats_border = "-------";
839
840         fs = fwd_streams[stream_id];
841         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
842             (fs->fwd_dropped == 0))
843                 return;
844         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
845                "TX Port=%2d/Queue=%2d %s\n",
846                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
847                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
848         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
849                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
850
851         /* if checksum mode */
852         if (cur_fwd_eng == &csum_fwd_engine) {
853                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
854                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
855         }
856
857 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
858         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
859         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
860 #endif
861 }
862
863 static void
864 flush_fwd_rx_queues(void)
865 {
866         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
867         portid_t  rxp;
868         portid_t port_id;
869         queueid_t rxq;
870         uint16_t  nb_rx;
871         uint16_t  i;
872         uint8_t   j;
873
874         for (j = 0; j < 2; j++) {
875                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
876                         for (rxq = 0; rxq < nb_rxq; rxq++) {
877                                 port_id = fwd_ports_ids[rxp];
878                                 do {
879                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
880                                                 pkts_burst, MAX_PKT_BURST);
881                                         for (i = 0; i < nb_rx; i++)
882                                                 rte_pktmbuf_free(pkts_burst[i]);
883                                 } while (nb_rx > 0);
884                         }
885                 }
886                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
887         }
888 }
889
890 static void
891 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
892 {
893         struct fwd_stream **fsm;
894         streamid_t nb_fs;
895         streamid_t sm_id;
896
897         fsm = &fwd_streams[fc->stream_idx];
898         nb_fs = fc->stream_nb;
899         do {
900                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
901                         (*pkt_fwd)(fsm[sm_id]);
902         } while (! fc->stopped);
903 }
904
905 static int
906 start_pkt_forward_on_core(void *fwd_arg)
907 {
908         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
909                              cur_fwd_config.fwd_eng->packet_fwd);
910         return 0;
911 }
912
913 /*
914  * Run the TXONLY packet forwarding engine to send a single burst of packets.
915  * Used to start communication flows in network loopback test configurations.
916  */
917 static int
918 run_one_txonly_burst_on_core(void *fwd_arg)
919 {
920         struct fwd_lcore *fwd_lc;
921         struct fwd_lcore tmp_lcore;
922
923         fwd_lc = (struct fwd_lcore *) fwd_arg;
924         tmp_lcore = *fwd_lc;
925         tmp_lcore.stopped = 1;
926         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
927         return 0;
928 }
929
930 /*
931  * Launch packet forwarding:
932  *     - Setup per-port forwarding context.
933  *     - launch logical cores with their forwarding configuration.
934  */
935 static void
936 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
937 {
938         port_fwd_begin_t port_fwd_begin;
939         unsigned int i;
940         unsigned int lc_id;
941         int diag;
942
943         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
944         if (port_fwd_begin != NULL) {
945                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
946                         (*port_fwd_begin)(fwd_ports_ids[i]);
947         }
948         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
949                 lc_id = fwd_lcores_cpuids[i];
950                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
951                         fwd_lcores[i]->stopped = 0;
952                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
953                                                      fwd_lcores[i], lc_id);
954                         if (diag != 0)
955                                 printf("launch lcore %u failed - diag=%d\n",
956                                        lc_id, diag);
957                 }
958         }
959 }
960
961 /*
962  * Launch packet forwarding configuration.
963  */
964 void
965 start_packet_forwarding(int with_tx_first)
966 {
967         port_fwd_begin_t port_fwd_begin;
968         port_fwd_end_t  port_fwd_end;
969         struct rte_port *port;
970         unsigned int i;
971         portid_t   pt_id;
972         streamid_t sm_id;
973
974         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
975                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
976
977         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
978                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
979
980         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
981                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
982                 (!nb_rxq || !nb_txq))
983                 rte_exit(EXIT_FAILURE,
984                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
985                         cur_fwd_eng->fwd_mode_name);
986
987         if (all_ports_started() == 0) {
988                 printf("Not all ports were started\n");
989                 return;
990         }
991         if (test_done == 0) {
992                 printf("Packet forwarding already started\n");
993                 return;
994         }
995         if(dcb_test) {
996                 for (i = 0; i < nb_fwd_ports; i++) {
997                         pt_id = fwd_ports_ids[i];
998                         port = &ports[pt_id];
999                         if (!port->dcb_flag) {
1000                                 printf("In DCB mode, all forwarding ports must "
1001                                        "be configured in this mode.\n");
1002                                 return;
1003                         }
1004                 }
1005                 if (nb_fwd_lcores == 1) {
1006                         printf("In DCB mode,the nb forwarding cores "
1007                                "should be larger than 1.\n");
1008                         return;
1009                 }
1010         }
1011         test_done = 0;
1012
1013         if(!no_flush_rx)
1014                 flush_fwd_rx_queues();
1015
1016         fwd_config_setup();
1017         rxtx_config_display();
1018
1019         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020                 pt_id = fwd_ports_ids[i];
1021                 port = &ports[pt_id];
1022                 rte_eth_stats_get(pt_id, &port->stats);
1023                 port->tx_dropped = 0;
1024
1025                 map_port_queue_stats_mapping_registers(pt_id, port);
1026         }
1027         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028                 fwd_streams[sm_id]->rx_packets = 0;
1029                 fwd_streams[sm_id]->tx_packets = 0;
1030                 fwd_streams[sm_id]->fwd_dropped = 0;
1031                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1033
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1039 #endif
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041                 fwd_streams[sm_id]->core_cycles = 0;
1042 #endif
1043         }
1044         if (with_tx_first) {
1045                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1046                 if (port_fwd_begin != NULL) {
1047                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1049                 }
1050                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1051                 rte_eal_mp_wait_lcore();
1052                 port_fwd_end = tx_only_engine.port_fwd_end;
1053                 if (port_fwd_end != NULL) {
1054                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055                                 (*port_fwd_end)(fwd_ports_ids[i]);
1056                 }
1057         }
1058         launch_packet_forwarding(start_pkt_forward_on_core);
1059 }
1060
1061 void
1062 stop_packet_forwarding(void)
1063 {
1064         struct rte_eth_stats stats;
1065         struct rte_port *port;
1066         port_fwd_end_t  port_fwd_end;
1067         int i;
1068         portid_t   pt_id;
1069         streamid_t sm_id;
1070         lcoreid_t  lc_id;
1071         uint64_t total_recv;
1072         uint64_t total_xmit;
1073         uint64_t total_rx_dropped;
1074         uint64_t total_tx_dropped;
1075         uint64_t total_rx_nombuf;
1076         uint64_t tx_dropped;
1077         uint64_t rx_bad_ip_csum;
1078         uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080         uint64_t fwd_cycles;
1081 #endif
1082         static const char *acc_stats_border = "+++++++++++++++";
1083
1084         if (all_ports_started() == 0) {
1085                 printf("Not all ports were started\n");
1086                 return;
1087         }
1088         if (test_done) {
1089                 printf("Packet forwarding not started\n");
1090                 return;
1091         }
1092         printf("Telling cores to stop...");
1093         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094                 fwd_lcores[lc_id]->stopped = 1;
1095         printf("\nWaiting for lcores to finish...\n");
1096         rte_eal_mp_wait_lcore();
1097         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098         if (port_fwd_end != NULL) {
1099                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100                         pt_id = fwd_ports_ids[i];
1101                         (*port_fwd_end)(pt_id);
1102                 }
1103         }
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1105         fwd_cycles = 0;
1106 #endif
1107         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108                 if (cur_fwd_config.nb_fwd_streams >
1109                     cur_fwd_config.nb_fwd_ports) {
1110                         fwd_stream_stats_display(sm_id);
1111                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1113                 } else {
1114                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1115                                 fwd_streams[sm_id];
1116                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1117                                 fwd_streams[sm_id];
1118                 }
1119                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120                 tx_dropped = (uint64_t) (tx_dropped +
1121                                          fwd_streams[sm_id]->fwd_dropped);
1122                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1123
1124                 rx_bad_ip_csum =
1125                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1128                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1129                                                         rx_bad_ip_csum;
1130
1131                 rx_bad_l4_csum =
1132                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1135                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1136                                                         rx_bad_l4_csum;
1137
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139                 fwd_cycles = (uint64_t) (fwd_cycles +
1140                                          fwd_streams[sm_id]->core_cycles);
1141 #endif
1142         }
1143         total_recv = 0;
1144         total_xmit = 0;
1145         total_rx_dropped = 0;
1146         total_tx_dropped = 0;
1147         total_rx_nombuf  = 0;
1148         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149                 pt_id = fwd_ports_ids[i];
1150
1151                 port = &ports[pt_id];
1152                 rte_eth_stats_get(pt_id, &stats);
1153                 stats.ipackets -= port->stats.ipackets;
1154                 port->stats.ipackets = 0;
1155                 stats.opackets -= port->stats.opackets;
1156                 port->stats.opackets = 0;
1157                 stats.ibytes   -= port->stats.ibytes;
1158                 port->stats.ibytes = 0;
1159                 stats.obytes   -= port->stats.obytes;
1160                 port->stats.obytes = 0;
1161                 stats.imissed  -= port->stats.imissed;
1162                 port->stats.imissed = 0;
1163                 stats.oerrors  -= port->stats.oerrors;
1164                 port->stats.oerrors = 0;
1165                 stats.rx_nombuf -= port->stats.rx_nombuf;
1166                 port->stats.rx_nombuf = 0;
1167
1168                 total_recv += stats.ipackets;
1169                 total_xmit += stats.opackets;
1170                 total_rx_dropped += stats.imissed;
1171                 total_tx_dropped += port->tx_dropped;
1172                 total_rx_nombuf  += stats.rx_nombuf;
1173
1174                 fwd_port_stats_display(pt_id, &stats);
1175         }
1176         printf("\n  %s Accumulated forward statistics for all ports"
1177                "%s\n",
1178                acc_stats_border, acc_stats_border);
1179         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1180                "%-"PRIu64"\n"
1181                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1182                "%-"PRIu64"\n",
1183                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1184                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1185         if (total_rx_nombuf > 0)
1186                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1187         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1188                "%s\n",
1189                acc_stats_border, acc_stats_border);
1190 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1191         if (total_recv > 0)
1192                 printf("\n  CPU cycles/packet=%u (total cycles="
1193                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1194                        (unsigned int)(fwd_cycles / total_recv),
1195                        fwd_cycles, total_recv);
1196 #endif
1197         printf("\nDone.\n");
1198         test_done = 1;
1199 }
1200
1201 void
1202 dev_set_link_up(portid_t pid)
1203 {
1204         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1205                 printf("\nSet link up fail.\n");
1206 }
1207
1208 void
1209 dev_set_link_down(portid_t pid)
1210 {
1211         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1212                 printf("\nSet link down fail.\n");
1213 }
1214
1215 static int
1216 all_ports_started(void)
1217 {
1218         portid_t pi;
1219         struct rte_port *port;
1220
1221         FOREACH_PORT(pi, ports) {
1222                 port = &ports[pi];
1223                 /* Check if there is a port which is not started */
1224                 if ((port->port_status != RTE_PORT_STARTED) &&
1225                         (port->slave_flag == 0))
1226                         return 0;
1227         }
1228
1229         /* No port is not started */
1230         return 1;
1231 }
1232
1233 int
1234 all_ports_stopped(void)
1235 {
1236         portid_t pi;
1237         struct rte_port *port;
1238
1239         FOREACH_PORT(pi, ports) {
1240                 port = &ports[pi];
1241                 if ((port->port_status != RTE_PORT_STOPPED) &&
1242                         (port->slave_flag == 0))
1243                         return 0;
1244         }
1245
1246         return 1;
1247 }
1248
1249 int
1250 port_is_started(portid_t port_id)
1251 {
1252         if (port_id_is_invalid(port_id, ENABLED_WARN))
1253                 return 0;
1254
1255         if (ports[port_id].port_status != RTE_PORT_STARTED)
1256                 return 0;
1257
1258         return 1;
1259 }
1260
1261 static int
1262 port_is_closed(portid_t port_id)
1263 {
1264         if (port_id_is_invalid(port_id, ENABLED_WARN))
1265                 return 0;
1266
1267         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1268                 return 0;
1269
1270         return 1;
1271 }
1272
1273 int
1274 start_port(portid_t pid)
1275 {
1276         int diag, need_check_link_status = -1;
1277         portid_t pi;
1278         queueid_t qi;
1279         struct rte_port *port;
1280         struct ether_addr mac_addr;
1281
1282         if (test_done == 0) {
1283                 printf("Please stop forwarding first\n");
1284                 return -1;
1285         }
1286
1287         if (port_id_is_invalid(pid, ENABLED_WARN))
1288                 return 0;
1289
1290         if (init_fwd_streams() < 0) {
1291                 printf("Fail from init_fwd_streams()\n");
1292                 return -1;
1293         }
1294
1295         if(dcb_config)
1296                 dcb_test = 1;
1297         FOREACH_PORT(pi, ports) {
1298                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1299                         continue;
1300
1301                 need_check_link_status = 0;
1302                 port = &ports[pi];
1303                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1304                                                  RTE_PORT_HANDLING) == 0) {
1305                         printf("Port %d is now not stopped\n", pi);
1306                         continue;
1307                 }
1308
1309                 if (port->need_reconfig > 0) {
1310                         port->need_reconfig = 0;
1311
1312                         printf("Configuring Port %d (socket %u)\n", pi,
1313                                         port->socket_id);
1314                         /* configure port */
1315                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1316                                                 &(port->dev_conf));
1317                         if (diag != 0) {
1318                                 if (rte_atomic16_cmpset(&(port->port_status),
1319                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1320                                         printf("Port %d can not be set back "
1321                                                         "to stopped\n", pi);
1322                                 printf("Fail to configure port %d\n", pi);
1323                                 /* try to reconfigure port next time */
1324                                 port->need_reconfig = 1;
1325                                 return -1;
1326                         }
1327                 }
1328                 if (port->need_reconfig_queues > 0) {
1329                         port->need_reconfig_queues = 0;
1330                         /* setup tx queues */
1331                         for (qi = 0; qi < nb_txq; qi++) {
1332                                 if ((numa_support) &&
1333                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1334                                         diag = rte_eth_tx_queue_setup(pi, qi,
1335                                                 nb_txd,txring_numa[pi],
1336                                                 &(port->tx_conf));
1337                                 else
1338                                         diag = rte_eth_tx_queue_setup(pi, qi,
1339                                                 nb_txd,port->socket_id,
1340                                                 &(port->tx_conf));
1341
1342                                 if (diag == 0)
1343                                         continue;
1344
1345                                 /* Fail to setup tx queue, return */
1346                                 if (rte_atomic16_cmpset(&(port->port_status),
1347                                                         RTE_PORT_HANDLING,
1348                                                         RTE_PORT_STOPPED) == 0)
1349                                         printf("Port %d can not be set back "
1350                                                         "to stopped\n", pi);
1351                                 printf("Fail to configure port %d tx queues\n", pi);
1352                                 /* try to reconfigure queues next time */
1353                                 port->need_reconfig_queues = 1;
1354                                 return -1;
1355                         }
1356                         /* setup rx queues */
1357                         for (qi = 0; qi < nb_rxq; qi++) {
1358                                 if ((numa_support) &&
1359                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1360                                         struct rte_mempool * mp =
1361                                                 mbuf_pool_find(rxring_numa[pi]);
1362                                         if (mp == NULL) {
1363                                                 printf("Failed to setup RX queue:"
1364                                                         "No mempool allocation"
1365                                                         "on the socket %d\n",
1366                                                         rxring_numa[pi]);
1367                                                 return -1;
1368                                         }
1369
1370                                         diag = rte_eth_rx_queue_setup(pi, qi,
1371                                              nb_rxd,rxring_numa[pi],
1372                                              &(port->rx_conf),mp);
1373                                 }
1374                                 else
1375                                         diag = rte_eth_rx_queue_setup(pi, qi,
1376                                              nb_rxd,port->socket_id,
1377                                              &(port->rx_conf),
1378                                              mbuf_pool_find(port->socket_id));
1379
1380                                 if (diag == 0)
1381                                         continue;
1382
1383
1384                                 /* Fail to setup rx queue, return */
1385                                 if (rte_atomic16_cmpset(&(port->port_status),
1386                                                         RTE_PORT_HANDLING,
1387                                                         RTE_PORT_STOPPED) == 0)
1388                                         printf("Port %d can not be set back "
1389                                                         "to stopped\n", pi);
1390                                 printf("Fail to configure port %d rx queues\n", pi);
1391                                 /* try to reconfigure queues next time */
1392                                 port->need_reconfig_queues = 1;
1393                                 return -1;
1394                         }
1395                 }
1396                 /* start port */
1397                 if (rte_eth_dev_start(pi) < 0) {
1398                         printf("Fail to start port %d\n", pi);
1399
1400                         /* Fail to setup rx queue, return */
1401                         if (rte_atomic16_cmpset(&(port->port_status),
1402                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1403                                 printf("Port %d can not be set back to "
1404                                                         "stopped\n", pi);
1405                         continue;
1406                 }
1407
1408                 if (rte_atomic16_cmpset(&(port->port_status),
1409                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1410                         printf("Port %d can not be set into started\n", pi);
1411
1412                 rte_eth_macaddr_get(pi, &mac_addr);
1413                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1414                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1415                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1416                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1417
1418                 /* at least one port started, need checking link status */
1419                 need_check_link_status = 1;
1420         }
1421
1422         if (need_check_link_status == 1 && !no_link_check)
1423                 check_all_ports_link_status(RTE_PORT_ALL);
1424         else if (need_check_link_status == 0)
1425                 printf("Please stop the ports first\n");
1426
1427         printf("Done\n");
1428         return 0;
1429 }
1430
1431 void
1432 stop_port(portid_t pid)
1433 {
1434         portid_t pi;
1435         struct rte_port *port;
1436         int need_check_link_status = 0;
1437
1438         if (test_done == 0) {
1439                 printf("Please stop forwarding first\n");
1440                 return;
1441         }
1442         if (dcb_test) {
1443                 dcb_test = 0;
1444                 dcb_config = 0;
1445         }
1446
1447         if (port_id_is_invalid(pid, ENABLED_WARN))
1448                 return;
1449
1450         printf("Stopping ports...\n");
1451
1452         FOREACH_PORT(pi, ports) {
1453                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1454                         continue;
1455
1456                 port = &ports[pi];
1457                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1458                                                 RTE_PORT_HANDLING) == 0)
1459                         continue;
1460
1461                 rte_eth_dev_stop(pi);
1462
1463                 if (rte_atomic16_cmpset(&(port->port_status),
1464                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1465                         printf("Port %d can not be set into stopped\n", pi);
1466                 need_check_link_status = 1;
1467         }
1468         if (need_check_link_status && !no_link_check)
1469                 check_all_ports_link_status(RTE_PORT_ALL);
1470
1471         printf("Done\n");
1472 }
1473
1474 void
1475 close_port(portid_t pid)
1476 {
1477         portid_t pi;
1478         struct rte_port *port;
1479
1480         if (test_done == 0) {
1481                 printf("Please stop forwarding first\n");
1482                 return;
1483         }
1484
1485         if (port_id_is_invalid(pid, ENABLED_WARN))
1486                 return;
1487
1488         printf("Closing ports...\n");
1489
1490         FOREACH_PORT(pi, ports) {
1491                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1492                         continue;
1493
1494                 port = &ports[pi];
1495                 if (rte_atomic16_cmpset(&(port->port_status),
1496                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1497                         printf("Port %d is already closed\n", pi);
1498                         continue;
1499                 }
1500
1501                 if (rte_atomic16_cmpset(&(port->port_status),
1502                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1503                         printf("Port %d is now not stopped\n", pi);
1504                         continue;
1505                 }
1506
1507                 rte_eth_dev_close(pi);
1508
1509                 if (rte_atomic16_cmpset(&(port->port_status),
1510                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1511                         printf("Port %d cannot be set to closed\n", pi);
1512         }
1513
1514         printf("Done\n");
1515 }
1516
1517 void
1518 attach_port(char *identifier)
1519 {
1520         portid_t pi = 0;
1521
1522         printf("Attaching a new port...\n");
1523
1524         if (identifier == NULL) {
1525                 printf("Invalid parameters are specified\n");
1526                 return;
1527         }
1528
1529         if (rte_eth_dev_attach(identifier, &pi))
1530                 return;
1531
1532         ports[pi].enabled = 1;
1533         reconfig(pi, rte_eth_dev_socket_id(pi));
1534         rte_eth_promiscuous_enable(pi);
1535
1536         nb_ports = rte_eth_dev_count();
1537
1538         ports[pi].port_status = RTE_PORT_STOPPED;
1539
1540         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1541         printf("Done\n");
1542 }
1543
1544 void
1545 detach_port(uint8_t port_id)
1546 {
1547         char name[RTE_ETH_NAME_MAX_LEN];
1548
1549         printf("Detaching a port...\n");
1550
1551         if (!port_is_closed(port_id)) {
1552                 printf("Please close port first\n");
1553                 return;
1554         }
1555
1556         if (rte_eth_dev_detach(port_id, name))
1557                 return;
1558
1559         ports[port_id].enabled = 0;
1560         nb_ports = rte_eth_dev_count();
1561
1562         printf("Port '%s' is detached. Now total ports is %d\n",
1563                         name, nb_ports);
1564         printf("Done\n");
1565         return;
1566 }
1567
1568 void
1569 pmd_test_exit(void)
1570 {
1571         portid_t pt_id;
1572
1573         if (test_done == 0)
1574                 stop_packet_forwarding();
1575
1576         if (ports != NULL) {
1577                 no_link_check = 1;
1578                 FOREACH_PORT(pt_id, ports) {
1579                         printf("\nShutting down port %d...\n", pt_id);
1580                         fflush(stdout);
1581                         stop_port(pt_id);
1582                         close_port(pt_id);
1583                 }
1584         }
1585         printf("\nBye...\n");
1586 }
1587
1588 typedef void (*cmd_func_t)(void);
1589 struct pmd_test_command {
1590         const char *cmd_name;
1591         cmd_func_t cmd_func;
1592 };
1593
1594 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1595
1596 /* Check the link status of all ports in up to 9s, and print them finally */
1597 static void
1598 check_all_ports_link_status(uint32_t port_mask)
1599 {
1600 #define CHECK_INTERVAL 100 /* 100ms */
1601 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1602         uint8_t portid, count, all_ports_up, print_flag = 0;
1603         struct rte_eth_link link;
1604
1605         printf("Checking link statuses...\n");
1606         fflush(stdout);
1607         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1608                 all_ports_up = 1;
1609                 FOREACH_PORT(portid, ports) {
1610                         if ((port_mask & (1 << portid)) == 0)
1611                                 continue;
1612                         memset(&link, 0, sizeof(link));
1613                         rte_eth_link_get_nowait(portid, &link);
1614                         /* print link status if flag set */
1615                         if (print_flag == 1) {
1616                                 if (link.link_status)
1617                                         printf("Port %d Link Up - speed %u "
1618                                                 "Mbps - %s\n", (uint8_t)portid,
1619                                                 (unsigned)link.link_speed,
1620                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1621                                         ("full-duplex") : ("half-duplex\n"));
1622                                 else
1623                                         printf("Port %d Link Down\n",
1624                                                 (uint8_t)portid);
1625                                 continue;
1626                         }
1627                         /* clear all_ports_up flag if any link down */
1628                         if (link.link_status == ETH_LINK_DOWN) {
1629                                 all_ports_up = 0;
1630                                 break;
1631                         }
1632                 }
1633                 /* after finally printing all link status, get out */
1634                 if (print_flag == 1)
1635                         break;
1636
1637                 if (all_ports_up == 0) {
1638                         fflush(stdout);
1639                         rte_delay_ms(CHECK_INTERVAL);
1640                 }
1641
1642                 /* set the print_flag if all ports up or timeout */
1643                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1644                         print_flag = 1;
1645                 }
1646         }
1647 }
1648
1649 static int
1650 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1651 {
1652         uint16_t i;
1653         int diag;
1654         uint8_t mapping_found = 0;
1655
1656         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1657                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1658                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1659                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1660                                         tx_queue_stats_mappings[i].queue_id,
1661                                         tx_queue_stats_mappings[i].stats_counter_id);
1662                         if (diag != 0)
1663                                 return diag;
1664                         mapping_found = 1;
1665                 }
1666         }
1667         if (mapping_found)
1668                 port->tx_queue_stats_mapping_enabled = 1;
1669         return 0;
1670 }
1671
1672 static int
1673 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1674 {
1675         uint16_t i;
1676         int diag;
1677         uint8_t mapping_found = 0;
1678
1679         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1680                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1681                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1682                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1683                                         rx_queue_stats_mappings[i].queue_id,
1684                                         rx_queue_stats_mappings[i].stats_counter_id);
1685                         if (diag != 0)
1686                                 return diag;
1687                         mapping_found = 1;
1688                 }
1689         }
1690         if (mapping_found)
1691                 port->rx_queue_stats_mapping_enabled = 1;
1692         return 0;
1693 }
1694
1695 static void
1696 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1697 {
1698         int diag = 0;
1699
1700         diag = set_tx_queue_stats_mapping_registers(pi, port);
1701         if (diag != 0) {
1702                 if (diag == -ENOTSUP) {
1703                         port->tx_queue_stats_mapping_enabled = 0;
1704                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1705                 }
1706                 else
1707                         rte_exit(EXIT_FAILURE,
1708                                         "set_tx_queue_stats_mapping_registers "
1709                                         "failed for port id=%d diag=%d\n",
1710                                         pi, diag);
1711         }
1712
1713         diag = set_rx_queue_stats_mapping_registers(pi, port);
1714         if (diag != 0) {
1715                 if (diag == -ENOTSUP) {
1716                         port->rx_queue_stats_mapping_enabled = 0;
1717                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1718                 }
1719                 else
1720                         rte_exit(EXIT_FAILURE,
1721                                         "set_rx_queue_stats_mapping_registers "
1722                                         "failed for port id=%d diag=%d\n",
1723                                         pi, diag);
1724         }
1725 }
1726
1727 static void
1728 rxtx_port_config(struct rte_port *port)
1729 {
1730         port->rx_conf = port->dev_info.default_rxconf;
1731         port->tx_conf = port->dev_info.default_txconf;
1732
1733         /* Check if any RX/TX parameters have been passed */
1734         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1735                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1736
1737         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1738                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1739
1740         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1741                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1742
1743         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1744                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1745
1746         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1747                 port->rx_conf.rx_drop_en = rx_drop_en;
1748
1749         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1750                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1751
1752         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1753                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1754
1755         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1756                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1757
1758         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1759                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1760
1761         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1762                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1763
1764         if (txq_flags != RTE_PMD_PARAM_UNSET)
1765                 port->tx_conf.txq_flags = txq_flags;
1766 }
1767
1768 void
1769 init_port_config(void)
1770 {
1771         portid_t pid;
1772         struct rte_port *port;
1773
1774         FOREACH_PORT(pid, ports) {
1775                 port = &ports[pid];
1776                 port->dev_conf.rxmode = rx_mode;
1777                 port->dev_conf.fdir_conf = fdir_conf;
1778                 if (nb_rxq > 1) {
1779                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1780                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1781                 } else {
1782                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1783                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1784                 }
1785
1786                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1787                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1788                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1789                         else
1790                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1791                 }
1792
1793                 if (port->dev_info.max_vfs != 0) {
1794                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1795                                 port->dev_conf.rxmode.mq_mode =
1796                                         ETH_MQ_RX_VMDQ_RSS;
1797                         else
1798                                 port->dev_conf.rxmode.mq_mode =
1799                                         ETH_MQ_RX_NONE;
1800
1801                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1802                 }
1803
1804                 rxtx_port_config(port);
1805
1806                 rte_eth_macaddr_get(pid, &port->eth_addr);
1807
1808                 map_port_queue_stats_mapping_registers(pid, port);
1809 #ifdef RTE_NIC_BYPASS
1810                 rte_eth_dev_bypass_init(pid);
1811 #endif
1812         }
1813 }
1814
1815 void set_port_slave_flag(portid_t slave_pid)
1816 {
1817         struct rte_port *port;
1818
1819         port = &ports[slave_pid];
1820         port->slave_flag = 1;
1821 }
1822
1823 void clear_port_slave_flag(portid_t slave_pid)
1824 {
1825         struct rte_port *port;
1826
1827         port = &ports[slave_pid];
1828         port->slave_flag = 0;
1829 }
1830
1831 const uint16_t vlan_tags[] = {
1832                 0,  1,  2,  3,  4,  5,  6,  7,
1833                 8,  9, 10, 11,  12, 13, 14, 15,
1834                 16, 17, 18, 19, 20, 21, 22, 23,
1835                 24, 25, 26, 27, 28, 29, 30, 31
1836 };
1837
1838 static  int
1839 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1840                  enum dcb_mode_enable dcb_mode,
1841                  enum rte_eth_nb_tcs num_tcs,
1842                  uint8_t pfc_en)
1843 {
1844         uint8_t i;
1845
1846         /*
1847          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1848          * given above, and the number of traffic classes available for use.
1849          */
1850         if (dcb_mode == DCB_VT_ENABLED) {
1851                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1852                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1853                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1854                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1855
1856                 /* VMDQ+DCB RX and TX configrations */
1857                 vmdq_rx_conf->enable_default_pool = 0;
1858                 vmdq_rx_conf->default_pool = 0;
1859                 vmdq_rx_conf->nb_queue_pools =
1860                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1861                 vmdq_tx_conf->nb_queue_pools =
1862                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1863
1864                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1865                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1866                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1867                         vmdq_rx_conf->pool_map[i].pools =
1868                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1869                 }
1870                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1871                         vmdq_rx_conf->dcb_tc[i] = i;
1872                         vmdq_tx_conf->dcb_tc[i] = i;
1873                 }
1874
1875                 /* set DCB mode of RX and TX of multiple queues */
1876                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1877                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1878         } else {
1879                 struct rte_eth_dcb_rx_conf *rx_conf =
1880                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1881                 struct rte_eth_dcb_tx_conf *tx_conf =
1882                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1883
1884                 rx_conf->nb_tcs = num_tcs;
1885                 tx_conf->nb_tcs = num_tcs;
1886
1887                 for (i = 0; i < num_tcs; i++) {
1888                         rx_conf->dcb_tc[i] = i;
1889                         tx_conf->dcb_tc[i] = i;
1890                 }
1891                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1892                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1893                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1894         }
1895
1896         if (pfc_en)
1897                 eth_conf->dcb_capability_en =
1898                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1899         else
1900                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1901
1902         return 0;
1903 }
1904
1905 int
1906 init_port_dcb_config(portid_t pid,
1907                      enum dcb_mode_enable dcb_mode,
1908                      enum rte_eth_nb_tcs num_tcs,
1909                      uint8_t pfc_en)
1910 {
1911         struct rte_eth_conf port_conf;
1912         struct rte_eth_dev_info dev_info;
1913         struct rte_port *rte_port;
1914         int retval;
1915         uint16_t i;
1916
1917         rte_eth_dev_info_get(pid, &dev_info);
1918
1919         /* If dev_info.vmdq_pool_base is greater than 0,
1920          * the queue id of vmdq pools is started after pf queues.
1921          */
1922         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1923                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1924                         " for port %d.", pid);
1925                 return -1;
1926         }
1927
1928         /* Assume the ports in testpmd have the same dcb capability
1929          * and has the same number of rxq and txq in dcb mode
1930          */
1931         if (dcb_mode == DCB_VT_ENABLED) {
1932                 nb_rxq = dev_info.max_rx_queues;
1933                 nb_txq = dev_info.max_tx_queues;
1934         } else {
1935                 /*if vt is disabled, use all pf queues */
1936                 if (dev_info.vmdq_pool_base == 0) {
1937                         nb_rxq = dev_info.max_rx_queues;
1938                         nb_txq = dev_info.max_tx_queues;
1939                 } else {
1940                         nb_rxq = (queueid_t)num_tcs;
1941                         nb_txq = (queueid_t)num_tcs;
1942
1943                 }
1944         }
1945         rx_free_thresh = 64;
1946
1947         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1948         /* Enter DCB configuration status */
1949         dcb_config = 1;
1950
1951         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1952         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1953         if (retval < 0)
1954                 return retval;
1955
1956         rte_port = &ports[pid];
1957         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1958
1959         rxtx_port_config(rte_port);
1960         /* VLAN filter */
1961         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1962         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1963                 rx_vft_set(pid, vlan_tags[i], 1);
1964
1965         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1966         map_port_queue_stats_mapping_registers(pid, rte_port);
1967
1968         rte_port->dcb_flag = 1;
1969
1970         return 0;
1971 }
1972
1973 static void
1974 init_port(void)
1975 {
1976         portid_t pid;
1977
1978         /* Configuration of Ethernet ports. */
1979         ports = rte_zmalloc("testpmd: ports",
1980                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1981                             RTE_CACHE_LINE_SIZE);
1982         if (ports == NULL) {
1983                 rte_exit(EXIT_FAILURE,
1984                                 "rte_zmalloc(%d struct rte_port) failed\n",
1985                                 RTE_MAX_ETHPORTS);
1986         }
1987
1988         /* enabled allocated ports */
1989         for (pid = 0; pid < nb_ports; pid++)
1990                 ports[pid].enabled = 1;
1991 }
1992
1993 static void
1994 force_quit(void)
1995 {
1996         pmd_test_exit();
1997         prompt_exit();
1998 }
1999
2000 static void
2001 signal_handler(int signum)
2002 {
2003         if (signum == SIGINT || signum == SIGTERM) {
2004                 printf("\nSignal %d received, preparing to exit...\n",
2005                                 signum);
2006                 force_quit();
2007                 /* exit with the expected status */
2008                 signal(signum, SIG_DFL);
2009                 kill(getpid(), signum);
2010         }
2011 }
2012
2013 int
2014 main(int argc, char** argv)
2015 {
2016         int  diag;
2017         uint8_t port_id;
2018
2019         signal(SIGINT, signal_handler);
2020         signal(SIGTERM, signal_handler);
2021
2022         diag = rte_eal_init(argc, argv);
2023         if (diag < 0)
2024                 rte_panic("Cannot init EAL\n");
2025
2026         nb_ports = (portid_t) rte_eth_dev_count();
2027         if (nb_ports == 0)
2028                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2029
2030         /* allocate port structures, and init them */
2031         init_port();
2032
2033         set_def_fwd_config();
2034         if (nb_lcores == 0)
2035                 rte_panic("Empty set of forwarding logical cores - check the "
2036                           "core mask supplied in the command parameters\n");
2037
2038         argc -= diag;
2039         argv += diag;
2040         if (argc > 1)
2041                 launch_args_parse(argc, argv);
2042
2043         if (!nb_rxq && !nb_txq)
2044                 printf("Warning: Either rx or tx queues should be non-zero\n");
2045
2046         if (nb_rxq > 1 && nb_rxq > nb_txq)
2047                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2048                        "but nb_txq=%d will prevent to fully test it.\n",
2049                        nb_rxq, nb_txq);
2050
2051         init_config();
2052         if (start_port(RTE_PORT_ALL) != 0)
2053                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2054
2055         /* set all ports to promiscuous mode by default */
2056         FOREACH_PORT(port_id, ports)
2057                 rte_eth_promiscuous_enable(port_id);
2058
2059 #ifdef RTE_LIBRTE_CMDLINE
2060         if (interactive == 1) {
2061                 if (auto_start) {
2062                         printf("Start automatic packet forwarding\n");
2063                         start_packet_forwarding(0);
2064                 }
2065                 prompt();
2066         } else
2067 #endif
2068         {
2069                 char c;
2070                 int rc;
2071
2072                 printf("No commandline core given, start packet forwarding\n");
2073                 start_packet_forwarding(0);
2074                 printf("Press enter to exit\n");
2075                 rc = read(0, &c, 1);
2076                 pmd_test_exit();
2077                 if (rc < 0)
2078                         return 1;
2079         }
2080
2081         return 0;
2082 }