app/test: check cloning a clone
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301         },
302         .drop_queue = 127,
303 };
304
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
306
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
309
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
312
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
315
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint32_t port_mask);
319
320 /*
321  * Check if all the ports are started.
322  * If yes, return positive value. If not, return zero.
323  */
324 static int all_ports_started(void);
325
326 /*
327  * Find next enabled port
328  */
329 portid_t
330 find_next_port(portid_t p, struct rte_port *ports, int size)
331 {
332         if (ports == NULL)
333                 rte_exit(-EINVAL, "failed to find a next port id\n");
334
335         while ((p < size) && (ports[p].enabled == 0))
336                 p++;
337         return p;
338 }
339
340 /*
341  * Setup default configuration.
342  */
343 static void
344 set_default_fwd_lcores_config(void)
345 {
346         unsigned int i;
347         unsigned int nb_lc;
348
349         nb_lc = 0;
350         for (i = 0; i < RTE_MAX_LCORE; i++) {
351                 if (! rte_lcore_is_enabled(i))
352                         continue;
353                 if (i == rte_get_master_lcore())
354                         continue;
355                 fwd_lcores_cpuids[nb_lc++] = i;
356         }
357         nb_lcores = (lcoreid_t) nb_lc;
358         nb_cfg_lcores = nb_lcores;
359         nb_fwd_lcores = 1;
360 }
361
362 static void
363 set_def_peer_eth_addrs(void)
364 {
365         portid_t i;
366
367         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
368                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
369                 peer_eth_addrs[i].addr_bytes[5] = i;
370         }
371 }
372
373 static void
374 set_default_fwd_ports_config(void)
375 {
376         portid_t pt_id;
377
378         for (pt_id = 0; pt_id < nb_ports; pt_id++)
379                 fwd_ports_ids[pt_id] = pt_id;
380
381         nb_cfg_ports = nb_ports;
382         nb_fwd_ports = nb_ports;
383 }
384
385 void
386 set_def_fwd_config(void)
387 {
388         set_default_fwd_lcores_config();
389         set_def_peer_eth_addrs();
390         set_default_fwd_ports_config();
391 }
392
393 /*
394  * Configuration initialisation done once at init time.
395  */
396 static void
397 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
398                  unsigned int socket_id)
399 {
400         char pool_name[RTE_MEMPOOL_NAMESIZE];
401         struct rte_mempool *rte_mp;
402         uint32_t mb_size;
403
404         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
405         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
406
407 #ifdef RTE_LIBRTE_PMD_XENVIRT
408         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
409                 (unsigned) mb_mempool_cache,
410                 sizeof(struct rte_pktmbuf_pool_private),
411                 rte_pktmbuf_pool_init, NULL,
412                 rte_pktmbuf_init, NULL,
413                 socket_id, 0);
414
415
416
417 #else
418         if (mp_anon != 0)
419                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
420                                     (unsigned) mb_mempool_cache,
421                                     sizeof(struct rte_pktmbuf_pool_private),
422                                     rte_pktmbuf_pool_init, NULL,
423                                     rte_pktmbuf_init, NULL,
424                                     socket_id, 0);
425         else
426                 /* wrapper to rte_mempool_create() */
427                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
428                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
429
430 #endif
431
432         if (rte_mp == NULL) {
433                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
434                                                 "failed\n", socket_id);
435         } else if (verbose_level > 0) {
436                 rte_mempool_dump(stdout, rte_mp);
437         }
438 }
439
440 /*
441  * Check given socket id is valid or not with NUMA mode,
442  * if valid, return 0, else return -1
443  */
444 static int
445 check_socket_id(const unsigned int socket_id)
446 {
447         static int warning_once = 0;
448
449         if (socket_id >= MAX_SOCKET) {
450                 if (!warning_once && numa_support)
451                         printf("Warning: NUMA should be configured manually by"
452                                " using --port-numa-config and"
453                                " --ring-numa-config parameters along with"
454                                " --numa.\n");
455                 warning_once = 1;
456                 return -1;
457         }
458         return 0;
459 }
460
461 static void
462 init_config(void)
463 {
464         portid_t pid;
465         struct rte_port *port;
466         struct rte_mempool *mbp;
467         unsigned int nb_mbuf_per_pool;
468         lcoreid_t  lc_id;
469         uint8_t port_per_socket[MAX_SOCKET];
470
471         memset(port_per_socket,0,MAX_SOCKET);
472         /* Configuration of logical cores. */
473         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
474                                 sizeof(struct fwd_lcore *) * nb_lcores,
475                                 RTE_CACHE_LINE_SIZE);
476         if (fwd_lcores == NULL) {
477                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
478                                                         "failed\n", nb_lcores);
479         }
480         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
481                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
482                                                sizeof(struct fwd_lcore),
483                                                RTE_CACHE_LINE_SIZE);
484                 if (fwd_lcores[lc_id] == NULL) {
485                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
486                                                                 "failed\n");
487                 }
488                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
489         }
490
491         /*
492          * Create pools of mbuf.
493          * If NUMA support is disabled, create a single pool of mbuf in
494          * socket 0 memory by default.
495          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
496          *
497          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
498          * nb_txd can be configured at run time.
499          */
500         if (param_total_num_mbufs)
501                 nb_mbuf_per_pool = param_total_num_mbufs;
502         else {
503                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
504                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
505
506                 if (!numa_support)
507                         nb_mbuf_per_pool =
508                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
509         }
510
511         if (!numa_support) {
512                 if (socket_num == UMA_NO_CONFIG)
513                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
514                 else
515                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
516                                                  socket_num);
517         }
518
519         FOREACH_PORT(pid, ports) {
520                 port = &ports[pid];
521                 rte_eth_dev_info_get(pid, &port->dev_info);
522
523                 if (numa_support) {
524                         if (port_numa[pid] != NUMA_NO_CONFIG)
525                                 port_per_socket[port_numa[pid]]++;
526                         else {
527                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
528
529                                 /* if socket_id is invalid, set to 0 */
530                                 if (check_socket_id(socket_id) < 0)
531                                         socket_id = 0;
532                                 port_per_socket[socket_id]++;
533                         }
534                 }
535
536                 /* set flag to initialize port/queue */
537                 port->need_reconfig = 1;
538                 port->need_reconfig_queues = 1;
539         }
540
541         if (numa_support) {
542                 uint8_t i;
543                 unsigned int nb_mbuf;
544
545                 if (param_total_num_mbufs)
546                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
547
548                 for (i = 0; i < MAX_SOCKET; i++) {
549                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
550                         if (nb_mbuf)
551                                 mbuf_pool_create(mbuf_data_size,
552                                                 nb_mbuf,i);
553                 }
554         }
555         init_port_config();
556
557         /*
558          * Records which Mbuf pool to use by each logical core, if needed.
559          */
560         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
561                 mbp = mbuf_pool_find(
562                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
563
564                 if (mbp == NULL)
565                         mbp = mbuf_pool_find(0);
566                 fwd_lcores[lc_id]->mbp = mbp;
567         }
568
569         /* Configuration of packet forwarding streams. */
570         if (init_fwd_streams() < 0)
571                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
572 }
573
574
575 void
576 reconfig(portid_t new_port_id, unsigned socket_id)
577 {
578         struct rte_port *port;
579
580         /* Reconfiguration of Ethernet ports. */
581         port = &ports[new_port_id];
582         rte_eth_dev_info_get(new_port_id, &port->dev_info);
583
584         /* set flag to initialize port/queue */
585         port->need_reconfig = 1;
586         port->need_reconfig_queues = 1;
587         port->socket_id = socket_id;
588
589         init_port_config();
590 }
591
592
593 int
594 init_fwd_streams(void)
595 {
596         portid_t pid;
597         struct rte_port *port;
598         streamid_t sm_id, nb_fwd_streams_new;
599
600         /* set socket id according to numa or not */
601         FOREACH_PORT(pid, ports) {
602                 port = &ports[pid];
603                 if (nb_rxq > port->dev_info.max_rx_queues) {
604                         printf("Fail: nb_rxq(%d) is greater than "
605                                 "max_rx_queues(%d)\n", nb_rxq,
606                                 port->dev_info.max_rx_queues);
607                         return -1;
608                 }
609                 if (nb_txq > port->dev_info.max_tx_queues) {
610                         printf("Fail: nb_txq(%d) is greater than "
611                                 "max_tx_queues(%d)\n", nb_txq,
612                                 port->dev_info.max_tx_queues);
613                         return -1;
614                 }
615                 if (numa_support) {
616                         if (port_numa[pid] != NUMA_NO_CONFIG)
617                                 port->socket_id = port_numa[pid];
618                         else {
619                                 port->socket_id = rte_eth_dev_socket_id(pid);
620
621                                 /* if socket_id is invalid, set to 0 */
622                                 if (check_socket_id(port->socket_id) < 0)
623                                         port->socket_id = 0;
624                         }
625                 }
626                 else {
627                         if (socket_num == UMA_NO_CONFIG)
628                                 port->socket_id = 0;
629                         else
630                                 port->socket_id = socket_num;
631                 }
632         }
633
634         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
635         if (nb_fwd_streams_new == nb_fwd_streams)
636                 return 0;
637         /* clear the old */
638         if (fwd_streams != NULL) {
639                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
640                         if (fwd_streams[sm_id] == NULL)
641                                 continue;
642                         rte_free(fwd_streams[sm_id]);
643                         fwd_streams[sm_id] = NULL;
644                 }
645                 rte_free(fwd_streams);
646                 fwd_streams = NULL;
647         }
648
649         /* init new */
650         nb_fwd_streams = nb_fwd_streams_new;
651         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
652                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
653         if (fwd_streams == NULL)
654                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
655                                                 "failed\n", nb_fwd_streams);
656
657         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
658                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
659                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
660                 if (fwd_streams[sm_id] == NULL)
661                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
662                                                                 " failed\n");
663         }
664
665         return 0;
666 }
667
668 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
669 static void
670 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
671 {
672         unsigned int total_burst;
673         unsigned int nb_burst;
674         unsigned int burst_stats[3];
675         uint16_t pktnb_stats[3];
676         uint16_t nb_pkt;
677         int burst_percent[3];
678
679         /*
680          * First compute the total number of packet bursts and the
681          * two highest numbers of bursts of the same number of packets.
682          */
683         total_burst = 0;
684         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
685         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
686         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
687                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
688                 if (nb_burst == 0)
689                         continue;
690                 total_burst += nb_burst;
691                 if (nb_burst > burst_stats[0]) {
692                         burst_stats[1] = burst_stats[0];
693                         pktnb_stats[1] = pktnb_stats[0];
694                         burst_stats[0] = nb_burst;
695                         pktnb_stats[0] = nb_pkt;
696                 }
697         }
698         if (total_burst == 0)
699                 return;
700         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
701         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
702                burst_percent[0], (int) pktnb_stats[0]);
703         if (burst_stats[0] == total_burst) {
704                 printf("]\n");
705                 return;
706         }
707         if (burst_stats[0] + burst_stats[1] == total_burst) {
708                 printf(" + %d%% of %d pkts]\n",
709                        100 - burst_percent[0], pktnb_stats[1]);
710                 return;
711         }
712         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
713         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
714         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
715                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
716                 return;
717         }
718         printf(" + %d%% of %d pkts + %d%% of others]\n",
719                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
720 }
721 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
722
723 static void
724 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
725 {
726         struct rte_port *port;
727         uint8_t i;
728
729         static const char *fwd_stats_border = "----------------------";
730
731         port = &ports[port_id];
732         printf("\n  %s Forward statistics for port %-2d %s\n",
733                fwd_stats_border, port_id, fwd_stats_border);
734
735         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
736                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
737                        "%-"PRIu64"\n",
738                        stats->ipackets, stats->imissed,
739                        (uint64_t) (stats->ipackets + stats->imissed));
740
741                 if (cur_fwd_eng == &csum_fwd_engine)
742                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
743                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
744                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
745                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
746                                "RX-error: %-"PRIu64"\n",
747                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
748                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
749                 }
750
751                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
752                        "%-"PRIu64"\n",
753                        stats->opackets, port->tx_dropped,
754                        (uint64_t) (stats->opackets + port->tx_dropped));
755         }
756         else {
757                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
758                        "%14"PRIu64"\n",
759                        stats->ipackets, stats->imissed,
760                        (uint64_t) (stats->ipackets + stats->imissed));
761
762                 if (cur_fwd_eng == &csum_fwd_engine)
763                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
764                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
765                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
766                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
767                                "    RX-error:%"PRIu64"\n",
768                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
769                         printf("  RX-nombufs:             %14"PRIu64"\n",
770                                stats->rx_nombuf);
771                 }
772
773                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
774                        "%14"PRIu64"\n",
775                        stats->opackets, port->tx_dropped,
776                        (uint64_t) (stats->opackets + port->tx_dropped));
777         }
778
779         /* Display statistics of XON/XOFF pause frames, if any. */
780         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
781              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
782                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
783                        stats->rx_pause_xoff, stats->rx_pause_xon);
784                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
785                        stats->tx_pause_xoff, stats->tx_pause_xon);
786         }
787
788 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
789         if (port->rx_stream)
790                 pkt_burst_stats_display("RX",
791                         &port->rx_stream->rx_burst_stats);
792         if (port->tx_stream)
793                 pkt_burst_stats_display("TX",
794                         &port->tx_stream->tx_burst_stats);
795 #endif
796         /* stats fdir */
797         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
798                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
799                        stats->fdirmiss,
800                        stats->fdirmatch);
801
802         if (port->rx_queue_stats_mapping_enabled) {
803                 printf("\n");
804                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
805                         printf("  Stats reg %2d RX-packets:%14"PRIu64
806                                "     RX-errors:%14"PRIu64
807                                "    RX-bytes:%14"PRIu64"\n",
808                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
809                 }
810                 printf("\n");
811         }
812         if (port->tx_queue_stats_mapping_enabled) {
813                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814                         printf("  Stats reg %2d TX-packets:%14"PRIu64
815                                "                                 TX-bytes:%14"PRIu64"\n",
816                                i, stats->q_opackets[i], stats->q_obytes[i]);
817                 }
818         }
819
820         printf("  %s--------------------------------%s\n",
821                fwd_stats_border, fwd_stats_border);
822 }
823
824 static void
825 fwd_stream_stats_display(streamid_t stream_id)
826 {
827         struct fwd_stream *fs;
828         static const char *fwd_top_stats_border = "-------";
829
830         fs = fwd_streams[stream_id];
831         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
832             (fs->fwd_dropped == 0))
833                 return;
834         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
835                "TX Port=%2d/Queue=%2d %s\n",
836                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
837                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
838         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
839                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
840
841         /* if checksum mode */
842         if (cur_fwd_eng == &csum_fwd_engine) {
843                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
844                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
845         }
846
847 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
848         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
849         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
850 #endif
851 }
852
853 static void
854 flush_fwd_rx_queues(void)
855 {
856         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
857         portid_t  rxp;
858         portid_t port_id;
859         queueid_t rxq;
860         uint16_t  nb_rx;
861         uint16_t  i;
862         uint8_t   j;
863
864         for (j = 0; j < 2; j++) {
865                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
866                         for (rxq = 0; rxq < nb_rxq; rxq++) {
867                                 port_id = fwd_ports_ids[rxp];
868                                 do {
869                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
870                                                 pkts_burst, MAX_PKT_BURST);
871                                         for (i = 0; i < nb_rx; i++)
872                                                 rte_pktmbuf_free(pkts_burst[i]);
873                                 } while (nb_rx > 0);
874                         }
875                 }
876                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
877         }
878 }
879
880 static void
881 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
882 {
883         struct fwd_stream **fsm;
884         streamid_t nb_fs;
885         streamid_t sm_id;
886
887         fsm = &fwd_streams[fc->stream_idx];
888         nb_fs = fc->stream_nb;
889         do {
890                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
891                         (*pkt_fwd)(fsm[sm_id]);
892         } while (! fc->stopped);
893 }
894
895 static int
896 start_pkt_forward_on_core(void *fwd_arg)
897 {
898         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
899                              cur_fwd_config.fwd_eng->packet_fwd);
900         return 0;
901 }
902
903 /*
904  * Run the TXONLY packet forwarding engine to send a single burst of packets.
905  * Used to start communication flows in network loopback test configurations.
906  */
907 static int
908 run_one_txonly_burst_on_core(void *fwd_arg)
909 {
910         struct fwd_lcore *fwd_lc;
911         struct fwd_lcore tmp_lcore;
912
913         fwd_lc = (struct fwd_lcore *) fwd_arg;
914         tmp_lcore = *fwd_lc;
915         tmp_lcore.stopped = 1;
916         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
917         return 0;
918 }
919
920 /*
921  * Launch packet forwarding:
922  *     - Setup per-port forwarding context.
923  *     - launch logical cores with their forwarding configuration.
924  */
925 static void
926 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
927 {
928         port_fwd_begin_t port_fwd_begin;
929         unsigned int i;
930         unsigned int lc_id;
931         int diag;
932
933         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
934         if (port_fwd_begin != NULL) {
935                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
936                         (*port_fwd_begin)(fwd_ports_ids[i]);
937         }
938         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
939                 lc_id = fwd_lcores_cpuids[i];
940                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
941                         fwd_lcores[i]->stopped = 0;
942                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
943                                                      fwd_lcores[i], lc_id);
944                         if (diag != 0)
945                                 printf("launch lcore %u failed - diag=%d\n",
946                                        lc_id, diag);
947                 }
948         }
949 }
950
951 /*
952  * Launch packet forwarding configuration.
953  */
954 void
955 start_packet_forwarding(int with_tx_first)
956 {
957         port_fwd_begin_t port_fwd_begin;
958         port_fwd_end_t  port_fwd_end;
959         struct rte_port *port;
960         unsigned int i;
961         portid_t   pt_id;
962         streamid_t sm_id;
963
964         if (all_ports_started() == 0) {
965                 printf("Not all ports were started\n");
966                 return;
967         }
968         if (test_done == 0) {
969                 printf("Packet forwarding already started\n");
970                 return;
971         }
972         if(dcb_test) {
973                 for (i = 0; i < nb_fwd_ports; i++) {
974                         pt_id = fwd_ports_ids[i];
975                         port = &ports[pt_id];
976                         if (!port->dcb_flag) {
977                                 printf("In DCB mode, all forwarding ports must "
978                                        "be configured in this mode.\n");
979                                 return;
980                         }
981                 }
982                 if (nb_fwd_lcores == 1) {
983                         printf("In DCB mode,the nb forwarding cores "
984                                "should be larger than 1.\n");
985                         return;
986                 }
987         }
988         test_done = 0;
989
990         if(!no_flush_rx)
991                 flush_fwd_rx_queues();
992
993         fwd_config_setup();
994         rxtx_config_display();
995
996         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
997                 pt_id = fwd_ports_ids[i];
998                 port = &ports[pt_id];
999                 rte_eth_stats_get(pt_id, &port->stats);
1000                 port->tx_dropped = 0;
1001
1002                 map_port_queue_stats_mapping_registers(pt_id, port);
1003         }
1004         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1005                 fwd_streams[sm_id]->rx_packets = 0;
1006                 fwd_streams[sm_id]->tx_packets = 0;
1007                 fwd_streams[sm_id]->fwd_dropped = 0;
1008                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1009                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1010
1011 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1012                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1013                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1014                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1015                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1016 #endif
1017 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1018                 fwd_streams[sm_id]->core_cycles = 0;
1019 #endif
1020         }
1021         if (with_tx_first) {
1022                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1023                 if (port_fwd_begin != NULL) {
1024                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1025                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1026                 }
1027                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1028                 rte_eal_mp_wait_lcore();
1029                 port_fwd_end = tx_only_engine.port_fwd_end;
1030                 if (port_fwd_end != NULL) {
1031                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1032                                 (*port_fwd_end)(fwd_ports_ids[i]);
1033                 }
1034         }
1035         launch_packet_forwarding(start_pkt_forward_on_core);
1036 }
1037
1038 void
1039 stop_packet_forwarding(void)
1040 {
1041         struct rte_eth_stats stats;
1042         struct rte_port *port;
1043         port_fwd_end_t  port_fwd_end;
1044         int i;
1045         portid_t   pt_id;
1046         streamid_t sm_id;
1047         lcoreid_t  lc_id;
1048         uint64_t total_recv;
1049         uint64_t total_xmit;
1050         uint64_t total_rx_dropped;
1051         uint64_t total_tx_dropped;
1052         uint64_t total_rx_nombuf;
1053         uint64_t tx_dropped;
1054         uint64_t rx_bad_ip_csum;
1055         uint64_t rx_bad_l4_csum;
1056 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1057         uint64_t fwd_cycles;
1058 #endif
1059         static const char *acc_stats_border = "+++++++++++++++";
1060
1061         if (all_ports_started() == 0) {
1062                 printf("Not all ports were started\n");
1063                 return;
1064         }
1065         if (test_done) {
1066                 printf("Packet forwarding not started\n");
1067                 return;
1068         }
1069         printf("Telling cores to stop...");
1070         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1071                 fwd_lcores[lc_id]->stopped = 1;
1072         printf("\nWaiting for lcores to finish...\n");
1073         rte_eal_mp_wait_lcore();
1074         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1075         if (port_fwd_end != NULL) {
1076                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1077                         pt_id = fwd_ports_ids[i];
1078                         (*port_fwd_end)(pt_id);
1079                 }
1080         }
1081 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1082         fwd_cycles = 0;
1083 #endif
1084         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1085                 if (cur_fwd_config.nb_fwd_streams >
1086                     cur_fwd_config.nb_fwd_ports) {
1087                         fwd_stream_stats_display(sm_id);
1088                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1089                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1090                 } else {
1091                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1092                                 fwd_streams[sm_id];
1093                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1094                                 fwd_streams[sm_id];
1095                 }
1096                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1097                 tx_dropped = (uint64_t) (tx_dropped +
1098                                          fwd_streams[sm_id]->fwd_dropped);
1099                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1100
1101                 rx_bad_ip_csum =
1102                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1103                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1104                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1105                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1106                                                         rx_bad_ip_csum;
1107
1108                 rx_bad_l4_csum =
1109                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1110                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1111                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1112                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1113                                                         rx_bad_l4_csum;
1114
1115 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1116                 fwd_cycles = (uint64_t) (fwd_cycles +
1117                                          fwd_streams[sm_id]->core_cycles);
1118 #endif
1119         }
1120         total_recv = 0;
1121         total_xmit = 0;
1122         total_rx_dropped = 0;
1123         total_tx_dropped = 0;
1124         total_rx_nombuf  = 0;
1125         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1126                 pt_id = fwd_ports_ids[i];
1127
1128                 port = &ports[pt_id];
1129                 rte_eth_stats_get(pt_id, &stats);
1130                 stats.ipackets -= port->stats.ipackets;
1131                 port->stats.ipackets = 0;
1132                 stats.opackets -= port->stats.opackets;
1133                 port->stats.opackets = 0;
1134                 stats.ibytes   -= port->stats.ibytes;
1135                 port->stats.ibytes = 0;
1136                 stats.obytes   -= port->stats.obytes;
1137                 port->stats.obytes = 0;
1138                 stats.imissed  -= port->stats.imissed;
1139                 port->stats.imissed = 0;
1140                 stats.oerrors  -= port->stats.oerrors;
1141                 port->stats.oerrors = 0;
1142                 stats.rx_nombuf -= port->stats.rx_nombuf;
1143                 port->stats.rx_nombuf = 0;
1144                 stats.fdirmatch -= port->stats.fdirmatch;
1145                 port->stats.rx_nombuf = 0;
1146                 stats.fdirmiss -= port->stats.fdirmiss;
1147                 port->stats.rx_nombuf = 0;
1148
1149                 total_recv += stats.ipackets;
1150                 total_xmit += stats.opackets;
1151                 total_rx_dropped += stats.imissed;
1152                 total_tx_dropped += port->tx_dropped;
1153                 total_rx_nombuf  += stats.rx_nombuf;
1154
1155                 fwd_port_stats_display(pt_id, &stats);
1156         }
1157         printf("\n  %s Accumulated forward statistics for all ports"
1158                "%s\n",
1159                acc_stats_border, acc_stats_border);
1160         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1161                "%-"PRIu64"\n"
1162                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1163                "%-"PRIu64"\n",
1164                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1165                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1166         if (total_rx_nombuf > 0)
1167                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1168         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1169                "%s\n",
1170                acc_stats_border, acc_stats_border);
1171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1172         if (total_recv > 0)
1173                 printf("\n  CPU cycles/packet=%u (total cycles="
1174                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1175                        (unsigned int)(fwd_cycles / total_recv),
1176                        fwd_cycles, total_recv);
1177 #endif
1178         printf("\nDone.\n");
1179         test_done = 1;
1180 }
1181
1182 void
1183 dev_set_link_up(portid_t pid)
1184 {
1185         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1186                 printf("\nSet link up fail.\n");
1187 }
1188
1189 void
1190 dev_set_link_down(portid_t pid)
1191 {
1192         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1193                 printf("\nSet link down fail.\n");
1194 }
1195
1196 static int
1197 all_ports_started(void)
1198 {
1199         portid_t pi;
1200         struct rte_port *port;
1201
1202         FOREACH_PORT(pi, ports) {
1203                 port = &ports[pi];
1204                 /* Check if there is a port which is not started */
1205                 if (port->port_status != RTE_PORT_STARTED)
1206                         return 0;
1207         }
1208
1209         /* No port is not started */
1210         return 1;
1211 }
1212
1213 int
1214 all_ports_stopped(void)
1215 {
1216         portid_t pi;
1217         struct rte_port *port;
1218
1219         FOREACH_PORT(pi, ports) {
1220                 port = &ports[pi];
1221                 if (port->port_status != RTE_PORT_STOPPED)
1222                         return 0;
1223         }
1224
1225         return 1;
1226 }
1227
1228 int
1229 port_is_started(portid_t port_id)
1230 {
1231         if (port_id_is_invalid(port_id, ENABLED_WARN))
1232                 return 0;
1233
1234         if (ports[port_id].port_status != RTE_PORT_STARTED)
1235                 return 0;
1236
1237         return 1;
1238 }
1239
1240 static int
1241 port_is_closed(portid_t port_id)
1242 {
1243         if (port_id_is_invalid(port_id, ENABLED_WARN))
1244                 return 0;
1245
1246         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1247                 return 0;
1248
1249         return 1;
1250 }
1251
1252 int
1253 start_port(portid_t pid)
1254 {
1255         int diag, need_check_link_status = -1;
1256         portid_t pi;
1257         queueid_t qi;
1258         struct rte_port *port;
1259         struct ether_addr mac_addr;
1260
1261         if (test_done == 0) {
1262                 printf("Please stop forwarding first\n");
1263                 return -1;
1264         }
1265
1266         if (port_id_is_invalid(pid, ENABLED_WARN))
1267                 return 0;
1268
1269         if (init_fwd_streams() < 0) {
1270                 printf("Fail from init_fwd_streams()\n");
1271                 return -1;
1272         }
1273
1274         if(dcb_config)
1275                 dcb_test = 1;
1276         FOREACH_PORT(pi, ports) {
1277                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1278                         continue;
1279
1280                 need_check_link_status = 0;
1281                 port = &ports[pi];
1282                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1283                                                  RTE_PORT_HANDLING) == 0) {
1284                         printf("Port %d is now not stopped\n", pi);
1285                         continue;
1286                 }
1287
1288                 if (port->need_reconfig > 0) {
1289                         port->need_reconfig = 0;
1290
1291                         printf("Configuring Port %d (socket %u)\n", pi,
1292                                         port->socket_id);
1293                         /* configure port */
1294                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1295                                                 &(port->dev_conf));
1296                         if (diag != 0) {
1297                                 if (rte_atomic16_cmpset(&(port->port_status),
1298                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1299                                         printf("Port %d can not be set back "
1300                                                         "to stopped\n", pi);
1301                                 printf("Fail to configure port %d\n", pi);
1302                                 /* try to reconfigure port next time */
1303                                 port->need_reconfig = 1;
1304                                 return -1;
1305                         }
1306                 }
1307                 if (port->need_reconfig_queues > 0) {
1308                         port->need_reconfig_queues = 0;
1309                         /* setup tx queues */
1310                         for (qi = 0; qi < nb_txq; qi++) {
1311                                 if ((numa_support) &&
1312                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1313                                         diag = rte_eth_tx_queue_setup(pi, qi,
1314                                                 nb_txd,txring_numa[pi],
1315                                                 &(port->tx_conf));
1316                                 else
1317                                         diag = rte_eth_tx_queue_setup(pi, qi,
1318                                                 nb_txd,port->socket_id,
1319                                                 &(port->tx_conf));
1320
1321                                 if (diag == 0)
1322                                         continue;
1323
1324                                 /* Fail to setup tx queue, return */
1325                                 if (rte_atomic16_cmpset(&(port->port_status),
1326                                                         RTE_PORT_HANDLING,
1327                                                         RTE_PORT_STOPPED) == 0)
1328                                         printf("Port %d can not be set back "
1329                                                         "to stopped\n", pi);
1330                                 printf("Fail to configure port %d tx queues\n", pi);
1331                                 /* try to reconfigure queues next time */
1332                                 port->need_reconfig_queues = 1;
1333                                 return -1;
1334                         }
1335                         /* setup rx queues */
1336                         for (qi = 0; qi < nb_rxq; qi++) {
1337                                 if ((numa_support) &&
1338                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1339                                         struct rte_mempool * mp =
1340                                                 mbuf_pool_find(rxring_numa[pi]);
1341                                         if (mp == NULL) {
1342                                                 printf("Failed to setup RX queue:"
1343                                                         "No mempool allocation"
1344                                                         "on the socket %d\n",
1345                                                         rxring_numa[pi]);
1346                                                 return -1;
1347                                         }
1348
1349                                         diag = rte_eth_rx_queue_setup(pi, qi,
1350                                              nb_rxd,rxring_numa[pi],
1351                                              &(port->rx_conf),mp);
1352                                 }
1353                                 else
1354                                         diag = rte_eth_rx_queue_setup(pi, qi,
1355                                              nb_rxd,port->socket_id,
1356                                              &(port->rx_conf),
1357                                              mbuf_pool_find(port->socket_id));
1358
1359                                 if (diag == 0)
1360                                         continue;
1361
1362
1363                                 /* Fail to setup rx queue, return */
1364                                 if (rte_atomic16_cmpset(&(port->port_status),
1365                                                         RTE_PORT_HANDLING,
1366                                                         RTE_PORT_STOPPED) == 0)
1367                                         printf("Port %d can not be set back "
1368                                                         "to stopped\n", pi);
1369                                 printf("Fail to configure port %d rx queues\n", pi);
1370                                 /* try to reconfigure queues next time */
1371                                 port->need_reconfig_queues = 1;
1372                                 return -1;
1373                         }
1374                 }
1375                 /* start port */
1376                 if (rte_eth_dev_start(pi) < 0) {
1377                         printf("Fail to start port %d\n", pi);
1378
1379                         /* Fail to setup rx queue, return */
1380                         if (rte_atomic16_cmpset(&(port->port_status),
1381                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1382                                 printf("Port %d can not be set back to "
1383                                                         "stopped\n", pi);
1384                         continue;
1385                 }
1386
1387                 if (rte_atomic16_cmpset(&(port->port_status),
1388                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1389                         printf("Port %d can not be set into started\n", pi);
1390
1391                 rte_eth_macaddr_get(pi, &mac_addr);
1392                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1393                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1394                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1395                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1396
1397                 /* at least one port started, need checking link status */
1398                 need_check_link_status = 1;
1399         }
1400
1401         if (need_check_link_status == 1 && !no_link_check)
1402                 check_all_ports_link_status(RTE_PORT_ALL);
1403         else if (need_check_link_status == 0)
1404                 printf("Please stop the ports first\n");
1405
1406         printf("Done\n");
1407         return 0;
1408 }
1409
1410 void
1411 stop_port(portid_t pid)
1412 {
1413         portid_t pi;
1414         struct rte_port *port;
1415         int need_check_link_status = 0;
1416
1417         if (test_done == 0) {
1418                 printf("Please stop forwarding first\n");
1419                 return;
1420         }
1421         if (dcb_test) {
1422                 dcb_test = 0;
1423                 dcb_config = 0;
1424         }
1425
1426         if (port_id_is_invalid(pid, ENABLED_WARN))
1427                 return;
1428
1429         printf("Stopping ports...\n");
1430
1431         FOREACH_PORT(pi, ports) {
1432                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1433                         continue;
1434
1435                 port = &ports[pi];
1436                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1437                                                 RTE_PORT_HANDLING) == 0)
1438                         continue;
1439
1440                 rte_eth_dev_stop(pi);
1441
1442                 if (rte_atomic16_cmpset(&(port->port_status),
1443                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1444                         printf("Port %d can not be set into stopped\n", pi);
1445                 need_check_link_status = 1;
1446         }
1447         if (need_check_link_status && !no_link_check)
1448                 check_all_ports_link_status(RTE_PORT_ALL);
1449
1450         printf("Done\n");
1451 }
1452
1453 void
1454 close_port(portid_t pid)
1455 {
1456         portid_t pi;
1457         struct rte_port *port;
1458
1459         if (test_done == 0) {
1460                 printf("Please stop forwarding first\n");
1461                 return;
1462         }
1463
1464         if (port_id_is_invalid(pid, ENABLED_WARN))
1465                 return;
1466
1467         printf("Closing ports...\n");
1468
1469         FOREACH_PORT(pi, ports) {
1470                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1471                         continue;
1472
1473                 port = &ports[pi];
1474                 if (rte_atomic16_cmpset(&(port->port_status),
1475                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1476                         printf("Port %d is now not stopped\n", pi);
1477                         continue;
1478                 }
1479
1480                 rte_eth_dev_close(pi);
1481
1482                 if (rte_atomic16_cmpset(&(port->port_status),
1483                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1484                         printf("Port %d can not be set into stopped\n", pi);
1485         }
1486
1487         printf("Done\n");
1488 }
1489
1490 void
1491 attach_port(char *identifier)
1492 {
1493         portid_t i, j, pi = 0;
1494
1495         printf("Attaching a new port...\n");
1496
1497         if (identifier == NULL) {
1498                 printf("Invalid parameters are specified\n");
1499                 return;
1500         }
1501
1502         if (test_done == 0) {
1503                 printf("Please stop forwarding first\n");
1504                 return;
1505         }
1506
1507         if (rte_eth_dev_attach(identifier, &pi))
1508                 return;
1509
1510         ports[pi].enabled = 1;
1511         reconfig(pi, rte_eth_dev_socket_id(pi));
1512         rte_eth_promiscuous_enable(pi);
1513
1514         nb_ports = rte_eth_dev_count();
1515
1516         /* set_default_fwd_ports_config(); */
1517         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1518         i = 0;
1519         FOREACH_PORT(j, ports) {
1520                 fwd_ports_ids[i] = j;
1521                 i++;
1522         }
1523         nb_cfg_ports = nb_ports;
1524         nb_fwd_ports++;
1525
1526         ports[pi].port_status = RTE_PORT_STOPPED;
1527
1528         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1529         printf("Done\n");
1530 }
1531
1532 void
1533 detach_port(uint8_t port_id)
1534 {
1535         portid_t i, pi = 0;
1536         char name[RTE_ETH_NAME_MAX_LEN];
1537
1538         printf("Detaching a port...\n");
1539
1540         if (!port_is_closed(port_id)) {
1541                 printf("Please close port first\n");
1542                 return;
1543         }
1544
1545         rte_eth_promiscuous_disable(port_id);
1546
1547         if (rte_eth_dev_detach(port_id, name))
1548                 return;
1549
1550         ports[port_id].enabled = 0;
1551         nb_ports = rte_eth_dev_count();
1552
1553         /* set_default_fwd_ports_config(); */
1554         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1555         i = 0;
1556         FOREACH_PORT(pi, ports) {
1557                 fwd_ports_ids[i] = pi;
1558                 i++;
1559         }
1560         nb_cfg_ports = nb_ports;
1561         nb_fwd_ports--;
1562
1563         printf("Port '%s' is detached. Now total ports is %d\n",
1564                         name, nb_ports);
1565         printf("Done\n");
1566         return;
1567 }
1568
1569 void
1570 pmd_test_exit(void)
1571 {
1572         portid_t pt_id;
1573
1574         if (test_done == 0)
1575                 stop_packet_forwarding();
1576
1577         FOREACH_PORT(pt_id, ports) {
1578                 printf("Stopping port %d...", pt_id);
1579                 fflush(stdout);
1580                 rte_eth_dev_close(pt_id);
1581                 printf("done\n");
1582         }
1583         printf("bye...\n");
1584 }
1585
1586 typedef void (*cmd_func_t)(void);
1587 struct pmd_test_command {
1588         const char *cmd_name;
1589         cmd_func_t cmd_func;
1590 };
1591
1592 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1593
1594 /* Check the link status of all ports in up to 9s, and print them finally */
1595 static void
1596 check_all_ports_link_status(uint32_t port_mask)
1597 {
1598 #define CHECK_INTERVAL 100 /* 100ms */
1599 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1600         uint8_t portid, count, all_ports_up, print_flag = 0;
1601         struct rte_eth_link link;
1602
1603         printf("Checking link statuses...\n");
1604         fflush(stdout);
1605         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1606                 all_ports_up = 1;
1607                 FOREACH_PORT(portid, ports) {
1608                         if ((port_mask & (1 << portid)) == 0)
1609                                 continue;
1610                         memset(&link, 0, sizeof(link));
1611                         rte_eth_link_get_nowait(portid, &link);
1612                         /* print link status if flag set */
1613                         if (print_flag == 1) {
1614                                 if (link.link_status)
1615                                         printf("Port %d Link Up - speed %u "
1616                                                 "Mbps - %s\n", (uint8_t)portid,
1617                                                 (unsigned)link.link_speed,
1618                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1619                                         ("full-duplex") : ("half-duplex\n"));
1620                                 else
1621                                         printf("Port %d Link Down\n",
1622                                                 (uint8_t)portid);
1623                                 continue;
1624                         }
1625                         /* clear all_ports_up flag if any link down */
1626                         if (link.link_status == 0) {
1627                                 all_ports_up = 0;
1628                                 break;
1629                         }
1630                 }
1631                 /* after finally printing all link status, get out */
1632                 if (print_flag == 1)
1633                         break;
1634
1635                 if (all_ports_up == 0) {
1636                         fflush(stdout);
1637                         rte_delay_ms(CHECK_INTERVAL);
1638                 }
1639
1640                 /* set the print_flag if all ports up or timeout */
1641                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1642                         print_flag = 1;
1643                 }
1644         }
1645 }
1646
1647 static int
1648 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1649 {
1650         uint16_t i;
1651         int diag;
1652         uint8_t mapping_found = 0;
1653
1654         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1655                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1656                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1657                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1658                                         tx_queue_stats_mappings[i].queue_id,
1659                                         tx_queue_stats_mappings[i].stats_counter_id);
1660                         if (diag != 0)
1661                                 return diag;
1662                         mapping_found = 1;
1663                 }
1664         }
1665         if (mapping_found)
1666                 port->tx_queue_stats_mapping_enabled = 1;
1667         return 0;
1668 }
1669
1670 static int
1671 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1672 {
1673         uint16_t i;
1674         int diag;
1675         uint8_t mapping_found = 0;
1676
1677         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1678                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1679                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1680                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1681                                         rx_queue_stats_mappings[i].queue_id,
1682                                         rx_queue_stats_mappings[i].stats_counter_id);
1683                         if (diag != 0)
1684                                 return diag;
1685                         mapping_found = 1;
1686                 }
1687         }
1688         if (mapping_found)
1689                 port->rx_queue_stats_mapping_enabled = 1;
1690         return 0;
1691 }
1692
1693 static void
1694 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1695 {
1696         int diag = 0;
1697
1698         diag = set_tx_queue_stats_mapping_registers(pi, port);
1699         if (diag != 0) {
1700                 if (diag == -ENOTSUP) {
1701                         port->tx_queue_stats_mapping_enabled = 0;
1702                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1703                 }
1704                 else
1705                         rte_exit(EXIT_FAILURE,
1706                                         "set_tx_queue_stats_mapping_registers "
1707                                         "failed for port id=%d diag=%d\n",
1708                                         pi, diag);
1709         }
1710
1711         diag = set_rx_queue_stats_mapping_registers(pi, port);
1712         if (diag != 0) {
1713                 if (diag == -ENOTSUP) {
1714                         port->rx_queue_stats_mapping_enabled = 0;
1715                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1716                 }
1717                 else
1718                         rte_exit(EXIT_FAILURE,
1719                                         "set_rx_queue_stats_mapping_registers "
1720                                         "failed for port id=%d diag=%d\n",
1721                                         pi, diag);
1722         }
1723 }
1724
1725 static void
1726 rxtx_port_config(struct rte_port *port)
1727 {
1728         port->rx_conf = port->dev_info.default_rxconf;
1729         port->tx_conf = port->dev_info.default_txconf;
1730
1731         /* Check if any RX/TX parameters have been passed */
1732         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1733                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1734
1735         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1736                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1737
1738         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1739                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1740
1741         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1742                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1743
1744         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1745                 port->rx_conf.rx_drop_en = rx_drop_en;
1746
1747         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1748                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1749
1750         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1751                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1752
1753         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1754                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1755
1756         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1757                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1758
1759         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1760                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1761
1762         if (txq_flags != RTE_PMD_PARAM_UNSET)
1763                 port->tx_conf.txq_flags = txq_flags;
1764 }
1765
1766 void
1767 init_port_config(void)
1768 {
1769         portid_t pid;
1770         struct rte_port *port;
1771
1772         FOREACH_PORT(pid, ports) {
1773                 port = &ports[pid];
1774                 port->dev_conf.rxmode = rx_mode;
1775                 port->dev_conf.fdir_conf = fdir_conf;
1776                 if (nb_rxq > 1) {
1777                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1778                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1779                 } else {
1780                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1781                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1782                 }
1783
1784                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1785                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1786                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1787                         else
1788                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1789                 }
1790
1791                 if (port->dev_info.max_vfs != 0) {
1792                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1793                                 port->dev_conf.rxmode.mq_mode =
1794                                         ETH_MQ_RX_VMDQ_RSS;
1795                         else
1796                                 port->dev_conf.rxmode.mq_mode =
1797                                         ETH_MQ_RX_NONE;
1798
1799                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1800                 }
1801
1802                 rxtx_port_config(port);
1803
1804                 rte_eth_macaddr_get(pid, &port->eth_addr);
1805
1806                 map_port_queue_stats_mapping_registers(pid, port);
1807 #ifdef RTE_NIC_BYPASS
1808                 rte_eth_dev_bypass_init(pid);
1809 #endif
1810         }
1811 }
1812
1813 const uint16_t vlan_tags[] = {
1814                 0,  1,  2,  3,  4,  5,  6,  7,
1815                 8,  9, 10, 11,  12, 13, 14, 15,
1816                 16, 17, 18, 19, 20, 21, 22, 23,
1817                 24, 25, 26, 27, 28, 29, 30, 31
1818 };
1819
1820 static  int
1821 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1822 {
1823         uint8_t i;
1824
1825         /*
1826          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1827          * given above, and the number of traffic classes available for use.
1828          */
1829         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1830                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1831                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1832
1833                 /* VMDQ+DCB RX and TX configrations */
1834                 vmdq_rx_conf.enable_default_pool = 0;
1835                 vmdq_rx_conf.default_pool = 0;
1836                 vmdq_rx_conf.nb_queue_pools =
1837                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1838                 vmdq_tx_conf.nb_queue_pools =
1839                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1840
1841                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1842                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1843                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1844                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1845                 }
1846                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1847                         vmdq_rx_conf.dcb_queue[i] = i;
1848                         vmdq_tx_conf.dcb_queue[i] = i;
1849                 }
1850
1851                 /*set DCB mode of RX and TX of multiple queues*/
1852                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1853                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1854                 if (dcb_conf->pfc_en)
1855                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1856                 else
1857                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1858
1859                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1860                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1861                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1862                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1863         }
1864         else {
1865                 struct rte_eth_dcb_rx_conf rx_conf;
1866                 struct rte_eth_dcb_tx_conf tx_conf;
1867
1868                 /* queue mapping configuration of DCB RX and TX */
1869                 if (dcb_conf->num_tcs == ETH_4_TCS)
1870                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1871                 else
1872                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1873
1874                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1875                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1876
1877                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1878                         rx_conf.dcb_queue[i] = i;
1879                         tx_conf.dcb_queue[i] = i;
1880                 }
1881                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1882                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1883                 if (dcb_conf->pfc_en)
1884                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1885                 else
1886                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1887
1888                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1889                                 sizeof(struct rte_eth_dcb_rx_conf)));
1890                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1891                                 sizeof(struct rte_eth_dcb_tx_conf)));
1892         }
1893
1894         return 0;
1895 }
1896
1897 int
1898 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1899 {
1900         struct rte_eth_conf port_conf;
1901         struct rte_port *rte_port;
1902         int retval;
1903         uint16_t nb_vlan;
1904         uint16_t i;
1905
1906         /* rxq and txq configuration in dcb mode */
1907         nb_rxq = 128;
1908         nb_txq = 128;
1909         rx_free_thresh = 64;
1910
1911         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1912         /* Enter DCB configuration status */
1913         dcb_config = 1;
1914
1915         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1916         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1917         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1918         if (retval < 0)
1919                 return retval;
1920
1921         rte_port = &ports[pid];
1922         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1923
1924         rxtx_port_config(rte_port);
1925         /* VLAN filter */
1926         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1927         for (i = 0; i < nb_vlan; i++){
1928                 rx_vft_set(pid, vlan_tags[i], 1);
1929         }
1930
1931         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1932         map_port_queue_stats_mapping_registers(pid, rte_port);
1933
1934         rte_port->dcb_flag = 1;
1935
1936         return 0;
1937 }
1938
1939 static void
1940 init_port(void)
1941 {
1942         portid_t pid;
1943
1944         /* Configuration of Ethernet ports. */
1945         ports = rte_zmalloc("testpmd: ports",
1946                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1947                             RTE_CACHE_LINE_SIZE);
1948         if (ports == NULL) {
1949                 rte_exit(EXIT_FAILURE,
1950                                 "rte_zmalloc(%d struct rte_port) failed\n",
1951                                 RTE_MAX_ETHPORTS);
1952         }
1953
1954         /* enabled allocated ports */
1955         for (pid = 0; pid < nb_ports; pid++)
1956                 ports[pid].enabled = 1;
1957 }
1958
1959 int
1960 main(int argc, char** argv)
1961 {
1962         int  diag;
1963         uint8_t port_id;
1964
1965         diag = rte_eal_init(argc, argv);
1966         if (diag < 0)
1967                 rte_panic("Cannot init EAL\n");
1968
1969         nb_ports = (portid_t) rte_eth_dev_count();
1970         if (nb_ports == 0)
1971                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
1972
1973         /* allocate port structures, and init them */
1974         init_port();
1975
1976         set_def_fwd_config();
1977         if (nb_lcores == 0)
1978                 rte_panic("Empty set of forwarding logical cores - check the "
1979                           "core mask supplied in the command parameters\n");
1980
1981         argc -= diag;
1982         argv += diag;
1983         if (argc > 1)
1984                 launch_args_parse(argc, argv);
1985
1986         if (nb_rxq > nb_txq)
1987                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1988                        "but nb_txq=%d will prevent to fully test it.\n",
1989                        nb_rxq, nb_txq);
1990
1991         init_config();
1992         if (start_port(RTE_PORT_ALL) != 0)
1993                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1994
1995         /* set all ports to promiscuous mode by default */
1996         FOREACH_PORT(port_id, ports)
1997                 rte_eth_promiscuous_enable(port_id);
1998
1999 #ifdef RTE_LIBRTE_CMDLINE
2000         if (interactive == 1) {
2001                 if (auto_start) {
2002                         printf("Start automatic packet forwarding\n");
2003                         start_packet_forwarding(0);
2004                 }
2005                 prompt();
2006         } else
2007 #endif
2008         {
2009                 char c;
2010                 int rc;
2011
2012                 printf("No commandline core given, start packet forwarding\n");
2013                 start_packet_forwarding(0);
2014                 printf("Press enter to exit\n");
2015                 rc = read(0, &c, 1);
2016                 if (rc < 0)
2017                         return 1;
2018         }
2019
2020         return 0;
2021 }