app/testpmd: detect numa socket count
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301         },
302         .drop_queue = 127,
303 };
304
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
306
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
309
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
312
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
315
316 unsigned max_socket = 0;
317
318 /* Forward function declarations */
319 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
320 static void check_all_ports_link_status(uint32_t port_mask);
321
322 /*
323  * Check if all the ports are started.
324  * If yes, return positive value. If not, return zero.
325  */
326 static int all_ports_started(void);
327
328 /*
329  * Find next enabled port
330  */
331 portid_t
332 find_next_port(portid_t p, struct rte_port *ports, int size)
333 {
334         if (ports == NULL)
335                 rte_exit(-EINVAL, "failed to find a next port id\n");
336
337         while ((p < size) && (ports[p].enabled == 0))
338                 p++;
339         return p;
340 }
341
342 /*
343  * Setup default configuration.
344  */
345 static void
346 set_default_fwd_lcores_config(void)
347 {
348         unsigned int i;
349         unsigned int nb_lc;
350         unsigned int sock_num;
351
352         nb_lc = 0;
353         for (i = 0; i < RTE_MAX_LCORE; i++) {
354                 if (! rte_lcore_is_enabled(i))
355                         continue;
356                 if (i == rte_get_master_lcore())
357                         continue;
358                 fwd_lcores_cpuids[nb_lc++] = i;
359                 sock_num = rte_lcore_to_socket_id(i) + 1;
360                 if (sock_num > max_socket) {
361                         if (sock_num > RTE_MAX_NUMA_NODES)
362                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
363                         max_socket = sock_num;
364                 }
365         }
366         nb_lcores = (lcoreid_t) nb_lc;
367         nb_cfg_lcores = nb_lcores;
368         nb_fwd_lcores = 1;
369 }
370
371 static void
372 set_def_peer_eth_addrs(void)
373 {
374         portid_t i;
375
376         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
377                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
378                 peer_eth_addrs[i].addr_bytes[5] = i;
379         }
380 }
381
382 static void
383 set_default_fwd_ports_config(void)
384 {
385         portid_t pt_id;
386
387         for (pt_id = 0; pt_id < nb_ports; pt_id++)
388                 fwd_ports_ids[pt_id] = pt_id;
389
390         nb_cfg_ports = nb_ports;
391         nb_fwd_ports = nb_ports;
392 }
393
394 void
395 set_def_fwd_config(void)
396 {
397         set_default_fwd_lcores_config();
398         set_def_peer_eth_addrs();
399         set_default_fwd_ports_config();
400 }
401
402 /*
403  * Configuration initialisation done once at init time.
404  */
405 static void
406 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
407                  unsigned int socket_id)
408 {
409         char pool_name[RTE_MEMPOOL_NAMESIZE];
410         struct rte_mempool *rte_mp;
411         uint32_t mb_size;
412
413         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
414         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
415
416 #ifdef RTE_LIBRTE_PMD_XENVIRT
417         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
418                 (unsigned) mb_mempool_cache,
419                 sizeof(struct rte_pktmbuf_pool_private),
420                 rte_pktmbuf_pool_init, NULL,
421                 rte_pktmbuf_init, NULL,
422                 socket_id, 0);
423
424
425
426 #else
427         if (mp_anon != 0)
428                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
429                                     (unsigned) mb_mempool_cache,
430                                     sizeof(struct rte_pktmbuf_pool_private),
431                                     rte_pktmbuf_pool_init, NULL,
432                                     rte_pktmbuf_init, NULL,
433                                     socket_id, 0);
434         else
435                 /* wrapper to rte_mempool_create() */
436                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
437                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
438
439 #endif
440
441         if (rte_mp == NULL) {
442                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
443                                                 "failed\n", socket_id);
444         } else if (verbose_level > 0) {
445                 rte_mempool_dump(stdout, rte_mp);
446         }
447 }
448
449 /*
450  * Check given socket id is valid or not with NUMA mode,
451  * if valid, return 0, else return -1
452  */
453 static int
454 check_socket_id(const unsigned int socket_id)
455 {
456         static int warning_once = 0;
457
458         if (socket_id >= max_socket) {
459                 if (!warning_once && numa_support)
460                         printf("Warning: NUMA should be configured manually by"
461                                " using --port-numa-config and"
462                                " --ring-numa-config parameters along with"
463                                " --numa.\n");
464                 warning_once = 1;
465                 return -1;
466         }
467         return 0;
468 }
469
470 static void
471 init_config(void)
472 {
473         portid_t pid;
474         struct rte_port *port;
475         struct rte_mempool *mbp;
476         unsigned int nb_mbuf_per_pool;
477         lcoreid_t  lc_id;
478         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
479
480         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
481         /* Configuration of logical cores. */
482         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
483                                 sizeof(struct fwd_lcore *) * nb_lcores,
484                                 RTE_CACHE_LINE_SIZE);
485         if (fwd_lcores == NULL) {
486                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
487                                                         "failed\n", nb_lcores);
488         }
489         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
490                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
491                                                sizeof(struct fwd_lcore),
492                                                RTE_CACHE_LINE_SIZE);
493                 if (fwd_lcores[lc_id] == NULL) {
494                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
495                                                                 "failed\n");
496                 }
497                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
498         }
499
500         /*
501          * Create pools of mbuf.
502          * If NUMA support is disabled, create a single pool of mbuf in
503          * socket 0 memory by default.
504          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
505          *
506          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
507          * nb_txd can be configured at run time.
508          */
509         if (param_total_num_mbufs)
510                 nb_mbuf_per_pool = param_total_num_mbufs;
511         else {
512                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
513                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
514
515                 if (!numa_support)
516                         nb_mbuf_per_pool =
517                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
518         }
519
520         if (!numa_support) {
521                 if (socket_num == UMA_NO_CONFIG)
522                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
523                 else
524                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
525                                                  socket_num);
526         }
527
528         FOREACH_PORT(pid, ports) {
529                 port = &ports[pid];
530                 rte_eth_dev_info_get(pid, &port->dev_info);
531
532                 if (numa_support) {
533                         if (port_numa[pid] != NUMA_NO_CONFIG)
534                                 port_per_socket[port_numa[pid]]++;
535                         else {
536                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
537
538                                 /* if socket_id is invalid, set to 0 */
539                                 if (check_socket_id(socket_id) < 0)
540                                         socket_id = 0;
541                                 port_per_socket[socket_id]++;
542                         }
543                 }
544
545                 /* set flag to initialize port/queue */
546                 port->need_reconfig = 1;
547                 port->need_reconfig_queues = 1;
548         }
549
550         if (numa_support) {
551                 uint8_t i;
552                 unsigned int nb_mbuf;
553
554                 if (param_total_num_mbufs)
555                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
556
557                 for (i = 0; i < max_socket; i++) {
558                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
559                         if (nb_mbuf)
560                                 mbuf_pool_create(mbuf_data_size,
561                                                 nb_mbuf,i);
562                 }
563         }
564         init_port_config();
565
566         /*
567          * Records which Mbuf pool to use by each logical core, if needed.
568          */
569         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
570                 mbp = mbuf_pool_find(
571                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
572
573                 if (mbp == NULL)
574                         mbp = mbuf_pool_find(0);
575                 fwd_lcores[lc_id]->mbp = mbp;
576         }
577
578         /* Configuration of packet forwarding streams. */
579         if (init_fwd_streams() < 0)
580                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
581 }
582
583
584 void
585 reconfig(portid_t new_port_id, unsigned socket_id)
586 {
587         struct rte_port *port;
588
589         /* Reconfiguration of Ethernet ports. */
590         port = &ports[new_port_id];
591         rte_eth_dev_info_get(new_port_id, &port->dev_info);
592
593         /* set flag to initialize port/queue */
594         port->need_reconfig = 1;
595         port->need_reconfig_queues = 1;
596         port->socket_id = socket_id;
597
598         init_port_config();
599 }
600
601
602 int
603 init_fwd_streams(void)
604 {
605         portid_t pid;
606         struct rte_port *port;
607         streamid_t sm_id, nb_fwd_streams_new;
608
609         /* set socket id according to numa or not */
610         FOREACH_PORT(pid, ports) {
611                 port = &ports[pid];
612                 if (nb_rxq > port->dev_info.max_rx_queues) {
613                         printf("Fail: nb_rxq(%d) is greater than "
614                                 "max_rx_queues(%d)\n", nb_rxq,
615                                 port->dev_info.max_rx_queues);
616                         return -1;
617                 }
618                 if (nb_txq > port->dev_info.max_tx_queues) {
619                         printf("Fail: nb_txq(%d) is greater than "
620                                 "max_tx_queues(%d)\n", nb_txq,
621                                 port->dev_info.max_tx_queues);
622                         return -1;
623                 }
624                 if (numa_support) {
625                         if (port_numa[pid] != NUMA_NO_CONFIG)
626                                 port->socket_id = port_numa[pid];
627                         else {
628                                 port->socket_id = rte_eth_dev_socket_id(pid);
629
630                                 /* if socket_id is invalid, set to 0 */
631                                 if (check_socket_id(port->socket_id) < 0)
632                                         port->socket_id = 0;
633                         }
634                 }
635                 else {
636                         if (socket_num == UMA_NO_CONFIG)
637                                 port->socket_id = 0;
638                         else
639                                 port->socket_id = socket_num;
640                 }
641         }
642
643         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
644         if (nb_fwd_streams_new == nb_fwd_streams)
645                 return 0;
646         /* clear the old */
647         if (fwd_streams != NULL) {
648                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
649                         if (fwd_streams[sm_id] == NULL)
650                                 continue;
651                         rte_free(fwd_streams[sm_id]);
652                         fwd_streams[sm_id] = NULL;
653                 }
654                 rte_free(fwd_streams);
655                 fwd_streams = NULL;
656         }
657
658         /* init new */
659         nb_fwd_streams = nb_fwd_streams_new;
660         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
661                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
662         if (fwd_streams == NULL)
663                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
664                                                 "failed\n", nb_fwd_streams);
665
666         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
668                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
669                 if (fwd_streams[sm_id] == NULL)
670                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
671                                                                 " failed\n");
672         }
673
674         return 0;
675 }
676
677 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
678 static void
679 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
680 {
681         unsigned int total_burst;
682         unsigned int nb_burst;
683         unsigned int burst_stats[3];
684         uint16_t pktnb_stats[3];
685         uint16_t nb_pkt;
686         int burst_percent[3];
687
688         /*
689          * First compute the total number of packet bursts and the
690          * two highest numbers of bursts of the same number of packets.
691          */
692         total_burst = 0;
693         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
694         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
695         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
696                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
697                 if (nb_burst == 0)
698                         continue;
699                 total_burst += nb_burst;
700                 if (nb_burst > burst_stats[0]) {
701                         burst_stats[1] = burst_stats[0];
702                         pktnb_stats[1] = pktnb_stats[0];
703                         burst_stats[0] = nb_burst;
704                         pktnb_stats[0] = nb_pkt;
705                 }
706         }
707         if (total_burst == 0)
708                 return;
709         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
710         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
711                burst_percent[0], (int) pktnb_stats[0]);
712         if (burst_stats[0] == total_burst) {
713                 printf("]\n");
714                 return;
715         }
716         if (burst_stats[0] + burst_stats[1] == total_burst) {
717                 printf(" + %d%% of %d pkts]\n",
718                        100 - burst_percent[0], pktnb_stats[1]);
719                 return;
720         }
721         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
722         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
723         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
724                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
725                 return;
726         }
727         printf(" + %d%% of %d pkts + %d%% of others]\n",
728                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
729 }
730 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
731
732 static void
733 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
734 {
735         struct rte_port *port;
736         uint8_t i;
737
738         static const char *fwd_stats_border = "----------------------";
739
740         port = &ports[port_id];
741         printf("\n  %s Forward statistics for port %-2d %s\n",
742                fwd_stats_border, port_id, fwd_stats_border);
743
744         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
745                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
746                        "%-"PRIu64"\n",
747                        stats->ipackets, stats->imissed,
748                        (uint64_t) (stats->ipackets + stats->imissed));
749
750                 if (cur_fwd_eng == &csum_fwd_engine)
751                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
752                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
753                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
754                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
755                                "RX-error: %-"PRIu64"\n",
756                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
757                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
758                 }
759
760                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
761                        "%-"PRIu64"\n",
762                        stats->opackets, port->tx_dropped,
763                        (uint64_t) (stats->opackets + port->tx_dropped));
764         }
765         else {
766                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
767                        "%14"PRIu64"\n",
768                        stats->ipackets, stats->imissed,
769                        (uint64_t) (stats->ipackets + stats->imissed));
770
771                 if (cur_fwd_eng == &csum_fwd_engine)
772                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
773                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
774                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
775                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
776                                "    RX-error:%"PRIu64"\n",
777                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
778                         printf("  RX-nombufs:             %14"PRIu64"\n",
779                                stats->rx_nombuf);
780                 }
781
782                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
783                        "%14"PRIu64"\n",
784                        stats->opackets, port->tx_dropped,
785                        (uint64_t) (stats->opackets + port->tx_dropped));
786         }
787
788         /* Display statistics of XON/XOFF pause frames, if any. */
789         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
790              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
791                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
792                        stats->rx_pause_xoff, stats->rx_pause_xon);
793                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
794                        stats->tx_pause_xoff, stats->tx_pause_xon);
795         }
796
797 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
798         if (port->rx_stream)
799                 pkt_burst_stats_display("RX",
800                         &port->rx_stream->rx_burst_stats);
801         if (port->tx_stream)
802                 pkt_burst_stats_display("TX",
803                         &port->tx_stream->tx_burst_stats);
804 #endif
805         /* stats fdir */
806         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
807                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
808                        stats->fdirmiss,
809                        stats->fdirmatch);
810
811         if (port->rx_queue_stats_mapping_enabled) {
812                 printf("\n");
813                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814                         printf("  Stats reg %2d RX-packets:%14"PRIu64
815                                "     RX-errors:%14"PRIu64
816                                "    RX-bytes:%14"PRIu64"\n",
817                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
818                 }
819                 printf("\n");
820         }
821         if (port->tx_queue_stats_mapping_enabled) {
822                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
823                         printf("  Stats reg %2d TX-packets:%14"PRIu64
824                                "                                 TX-bytes:%14"PRIu64"\n",
825                                i, stats->q_opackets[i], stats->q_obytes[i]);
826                 }
827         }
828
829         printf("  %s--------------------------------%s\n",
830                fwd_stats_border, fwd_stats_border);
831 }
832
833 static void
834 fwd_stream_stats_display(streamid_t stream_id)
835 {
836         struct fwd_stream *fs;
837         static const char *fwd_top_stats_border = "-------";
838
839         fs = fwd_streams[stream_id];
840         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
841             (fs->fwd_dropped == 0))
842                 return;
843         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
844                "TX Port=%2d/Queue=%2d %s\n",
845                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
846                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
847         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
848                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
849
850         /* if checksum mode */
851         if (cur_fwd_eng == &csum_fwd_engine) {
852                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
853                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
854         }
855
856 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
857         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
858         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
859 #endif
860 }
861
862 static void
863 flush_fwd_rx_queues(void)
864 {
865         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
866         portid_t  rxp;
867         portid_t port_id;
868         queueid_t rxq;
869         uint16_t  nb_rx;
870         uint16_t  i;
871         uint8_t   j;
872
873         for (j = 0; j < 2; j++) {
874                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
875                         for (rxq = 0; rxq < nb_rxq; rxq++) {
876                                 port_id = fwd_ports_ids[rxp];
877                                 do {
878                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
879                                                 pkts_burst, MAX_PKT_BURST);
880                                         for (i = 0; i < nb_rx; i++)
881                                                 rte_pktmbuf_free(pkts_burst[i]);
882                                 } while (nb_rx > 0);
883                         }
884                 }
885                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
886         }
887 }
888
889 static void
890 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
891 {
892         struct fwd_stream **fsm;
893         streamid_t nb_fs;
894         streamid_t sm_id;
895
896         fsm = &fwd_streams[fc->stream_idx];
897         nb_fs = fc->stream_nb;
898         do {
899                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
900                         (*pkt_fwd)(fsm[sm_id]);
901         } while (! fc->stopped);
902 }
903
904 static int
905 start_pkt_forward_on_core(void *fwd_arg)
906 {
907         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
908                              cur_fwd_config.fwd_eng->packet_fwd);
909         return 0;
910 }
911
912 /*
913  * Run the TXONLY packet forwarding engine to send a single burst of packets.
914  * Used to start communication flows in network loopback test configurations.
915  */
916 static int
917 run_one_txonly_burst_on_core(void *fwd_arg)
918 {
919         struct fwd_lcore *fwd_lc;
920         struct fwd_lcore tmp_lcore;
921
922         fwd_lc = (struct fwd_lcore *) fwd_arg;
923         tmp_lcore = *fwd_lc;
924         tmp_lcore.stopped = 1;
925         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
926         return 0;
927 }
928
929 /*
930  * Launch packet forwarding:
931  *     - Setup per-port forwarding context.
932  *     - launch logical cores with their forwarding configuration.
933  */
934 static void
935 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
936 {
937         port_fwd_begin_t port_fwd_begin;
938         unsigned int i;
939         unsigned int lc_id;
940         int diag;
941
942         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
943         if (port_fwd_begin != NULL) {
944                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
945                         (*port_fwd_begin)(fwd_ports_ids[i]);
946         }
947         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
948                 lc_id = fwd_lcores_cpuids[i];
949                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
950                         fwd_lcores[i]->stopped = 0;
951                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
952                                                      fwd_lcores[i], lc_id);
953                         if (diag != 0)
954                                 printf("launch lcore %u failed - diag=%d\n",
955                                        lc_id, diag);
956                 }
957         }
958 }
959
960 /*
961  * Launch packet forwarding configuration.
962  */
963 void
964 start_packet_forwarding(int with_tx_first)
965 {
966         port_fwd_begin_t port_fwd_begin;
967         port_fwd_end_t  port_fwd_end;
968         struct rte_port *port;
969         unsigned int i;
970         portid_t   pt_id;
971         streamid_t sm_id;
972
973         if (all_ports_started() == 0) {
974                 printf("Not all ports were started\n");
975                 return;
976         }
977         if (test_done == 0) {
978                 printf("Packet forwarding already started\n");
979                 return;
980         }
981         if(dcb_test) {
982                 for (i = 0; i < nb_fwd_ports; i++) {
983                         pt_id = fwd_ports_ids[i];
984                         port = &ports[pt_id];
985                         if (!port->dcb_flag) {
986                                 printf("In DCB mode, all forwarding ports must "
987                                        "be configured in this mode.\n");
988                                 return;
989                         }
990                 }
991                 if (nb_fwd_lcores == 1) {
992                         printf("In DCB mode,the nb forwarding cores "
993                                "should be larger than 1.\n");
994                         return;
995                 }
996         }
997         test_done = 0;
998
999         if(!no_flush_rx)
1000                 flush_fwd_rx_queues();
1001
1002         fwd_config_setup();
1003         rxtx_config_display();
1004
1005         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1006                 pt_id = fwd_ports_ids[i];
1007                 port = &ports[pt_id];
1008                 rte_eth_stats_get(pt_id, &port->stats);
1009                 port->tx_dropped = 0;
1010
1011                 map_port_queue_stats_mapping_registers(pt_id, port);
1012         }
1013         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1014                 fwd_streams[sm_id]->rx_packets = 0;
1015                 fwd_streams[sm_id]->tx_packets = 0;
1016                 fwd_streams[sm_id]->fwd_dropped = 0;
1017                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1018                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1019
1020 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1021                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1022                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1023                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1024                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1025 #endif
1026 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1027                 fwd_streams[sm_id]->core_cycles = 0;
1028 #endif
1029         }
1030         if (with_tx_first) {
1031                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1032                 if (port_fwd_begin != NULL) {
1033                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1034                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1035                 }
1036                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1037                 rte_eal_mp_wait_lcore();
1038                 port_fwd_end = tx_only_engine.port_fwd_end;
1039                 if (port_fwd_end != NULL) {
1040                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1041                                 (*port_fwd_end)(fwd_ports_ids[i]);
1042                 }
1043         }
1044         launch_packet_forwarding(start_pkt_forward_on_core);
1045 }
1046
1047 void
1048 stop_packet_forwarding(void)
1049 {
1050         struct rte_eth_stats stats;
1051         struct rte_port *port;
1052         port_fwd_end_t  port_fwd_end;
1053         int i;
1054         portid_t   pt_id;
1055         streamid_t sm_id;
1056         lcoreid_t  lc_id;
1057         uint64_t total_recv;
1058         uint64_t total_xmit;
1059         uint64_t total_rx_dropped;
1060         uint64_t total_tx_dropped;
1061         uint64_t total_rx_nombuf;
1062         uint64_t tx_dropped;
1063         uint64_t rx_bad_ip_csum;
1064         uint64_t rx_bad_l4_csum;
1065 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1066         uint64_t fwd_cycles;
1067 #endif
1068         static const char *acc_stats_border = "+++++++++++++++";
1069
1070         if (all_ports_started() == 0) {
1071                 printf("Not all ports were started\n");
1072                 return;
1073         }
1074         if (test_done) {
1075                 printf("Packet forwarding not started\n");
1076                 return;
1077         }
1078         printf("Telling cores to stop...");
1079         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1080                 fwd_lcores[lc_id]->stopped = 1;
1081         printf("\nWaiting for lcores to finish...\n");
1082         rte_eal_mp_wait_lcore();
1083         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1084         if (port_fwd_end != NULL) {
1085                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1086                         pt_id = fwd_ports_ids[i];
1087                         (*port_fwd_end)(pt_id);
1088                 }
1089         }
1090 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1091         fwd_cycles = 0;
1092 #endif
1093         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1094                 if (cur_fwd_config.nb_fwd_streams >
1095                     cur_fwd_config.nb_fwd_ports) {
1096                         fwd_stream_stats_display(sm_id);
1097                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1098                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1099                 } else {
1100                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1101                                 fwd_streams[sm_id];
1102                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1103                                 fwd_streams[sm_id];
1104                 }
1105                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1106                 tx_dropped = (uint64_t) (tx_dropped +
1107                                          fwd_streams[sm_id]->fwd_dropped);
1108                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1109
1110                 rx_bad_ip_csum =
1111                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1112                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1113                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1114                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1115                                                         rx_bad_ip_csum;
1116
1117                 rx_bad_l4_csum =
1118                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1119                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1120                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1121                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1122                                                         rx_bad_l4_csum;
1123
1124 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1125                 fwd_cycles = (uint64_t) (fwd_cycles +
1126                                          fwd_streams[sm_id]->core_cycles);
1127 #endif
1128         }
1129         total_recv = 0;
1130         total_xmit = 0;
1131         total_rx_dropped = 0;
1132         total_tx_dropped = 0;
1133         total_rx_nombuf  = 0;
1134         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1135                 pt_id = fwd_ports_ids[i];
1136
1137                 port = &ports[pt_id];
1138                 rte_eth_stats_get(pt_id, &stats);
1139                 stats.ipackets -= port->stats.ipackets;
1140                 port->stats.ipackets = 0;
1141                 stats.opackets -= port->stats.opackets;
1142                 port->stats.opackets = 0;
1143                 stats.ibytes   -= port->stats.ibytes;
1144                 port->stats.ibytes = 0;
1145                 stats.obytes   -= port->stats.obytes;
1146                 port->stats.obytes = 0;
1147                 stats.imissed  -= port->stats.imissed;
1148                 port->stats.imissed = 0;
1149                 stats.oerrors  -= port->stats.oerrors;
1150                 port->stats.oerrors = 0;
1151                 stats.rx_nombuf -= port->stats.rx_nombuf;
1152                 port->stats.rx_nombuf = 0;
1153                 stats.fdirmatch -= port->stats.fdirmatch;
1154                 port->stats.rx_nombuf = 0;
1155                 stats.fdirmiss -= port->stats.fdirmiss;
1156                 port->stats.rx_nombuf = 0;
1157
1158                 total_recv += stats.ipackets;
1159                 total_xmit += stats.opackets;
1160                 total_rx_dropped += stats.imissed;
1161                 total_tx_dropped += port->tx_dropped;
1162                 total_rx_nombuf  += stats.rx_nombuf;
1163
1164                 fwd_port_stats_display(pt_id, &stats);
1165         }
1166         printf("\n  %s Accumulated forward statistics for all ports"
1167                "%s\n",
1168                acc_stats_border, acc_stats_border);
1169         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1170                "%-"PRIu64"\n"
1171                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1172                "%-"PRIu64"\n",
1173                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1174                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1175         if (total_rx_nombuf > 0)
1176                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1177         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1178                "%s\n",
1179                acc_stats_border, acc_stats_border);
1180 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1181         if (total_recv > 0)
1182                 printf("\n  CPU cycles/packet=%u (total cycles="
1183                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1184                        (unsigned int)(fwd_cycles / total_recv),
1185                        fwd_cycles, total_recv);
1186 #endif
1187         printf("\nDone.\n");
1188         test_done = 1;
1189 }
1190
1191 void
1192 dev_set_link_up(portid_t pid)
1193 {
1194         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1195                 printf("\nSet link up fail.\n");
1196 }
1197
1198 void
1199 dev_set_link_down(portid_t pid)
1200 {
1201         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1202                 printf("\nSet link down fail.\n");
1203 }
1204
1205 static int
1206 all_ports_started(void)
1207 {
1208         portid_t pi;
1209         struct rte_port *port;
1210
1211         FOREACH_PORT(pi, ports) {
1212                 port = &ports[pi];
1213                 /* Check if there is a port which is not started */
1214                 if ((port->port_status != RTE_PORT_STARTED) &&
1215                         (port->slave_flag == 0))
1216                         return 0;
1217         }
1218
1219         /* No port is not started */
1220         return 1;
1221 }
1222
1223 int
1224 all_ports_stopped(void)
1225 {
1226         portid_t pi;
1227         struct rte_port *port;
1228
1229         FOREACH_PORT(pi, ports) {
1230                 port = &ports[pi];
1231                 if ((port->port_status != RTE_PORT_STOPPED) &&
1232                         (port->slave_flag == 0))
1233                         return 0;
1234         }
1235
1236         return 1;
1237 }
1238
1239 int
1240 port_is_started(portid_t port_id)
1241 {
1242         if (port_id_is_invalid(port_id, ENABLED_WARN))
1243                 return 0;
1244
1245         if (ports[port_id].port_status != RTE_PORT_STARTED)
1246                 return 0;
1247
1248         return 1;
1249 }
1250
1251 static int
1252 port_is_closed(portid_t port_id)
1253 {
1254         if (port_id_is_invalid(port_id, ENABLED_WARN))
1255                 return 0;
1256
1257         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1258                 return 0;
1259
1260         return 1;
1261 }
1262
1263 int
1264 start_port(portid_t pid)
1265 {
1266         int diag, need_check_link_status = -1;
1267         portid_t pi;
1268         queueid_t qi;
1269         struct rte_port *port;
1270         struct ether_addr mac_addr;
1271
1272         if (test_done == 0) {
1273                 printf("Please stop forwarding first\n");
1274                 return -1;
1275         }
1276
1277         if (port_id_is_invalid(pid, ENABLED_WARN))
1278                 return 0;
1279
1280         if (init_fwd_streams() < 0) {
1281                 printf("Fail from init_fwd_streams()\n");
1282                 return -1;
1283         }
1284
1285         if(dcb_config)
1286                 dcb_test = 1;
1287         FOREACH_PORT(pi, ports) {
1288                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1289                         continue;
1290
1291                 need_check_link_status = 0;
1292                 port = &ports[pi];
1293                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1294                                                  RTE_PORT_HANDLING) == 0) {
1295                         printf("Port %d is now not stopped\n", pi);
1296                         continue;
1297                 }
1298
1299                 if (port->need_reconfig > 0) {
1300                         port->need_reconfig = 0;
1301
1302                         printf("Configuring Port %d (socket %u)\n", pi,
1303                                         port->socket_id);
1304                         /* configure port */
1305                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1306                                                 &(port->dev_conf));
1307                         if (diag != 0) {
1308                                 if (rte_atomic16_cmpset(&(port->port_status),
1309                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1310                                         printf("Port %d can not be set back "
1311                                                         "to stopped\n", pi);
1312                                 printf("Fail to configure port %d\n", pi);
1313                                 /* try to reconfigure port next time */
1314                                 port->need_reconfig = 1;
1315                                 return -1;
1316                         }
1317                 }
1318                 if (port->need_reconfig_queues > 0) {
1319                         port->need_reconfig_queues = 0;
1320                         /* setup tx queues */
1321                         for (qi = 0; qi < nb_txq; qi++) {
1322                                 if ((numa_support) &&
1323                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1324                                         diag = rte_eth_tx_queue_setup(pi, qi,
1325                                                 nb_txd,txring_numa[pi],
1326                                                 &(port->tx_conf));
1327                                 else
1328                                         diag = rte_eth_tx_queue_setup(pi, qi,
1329                                                 nb_txd,port->socket_id,
1330                                                 &(port->tx_conf));
1331
1332                                 if (diag == 0)
1333                                         continue;
1334
1335                                 /* Fail to setup tx queue, return */
1336                                 if (rte_atomic16_cmpset(&(port->port_status),
1337                                                         RTE_PORT_HANDLING,
1338                                                         RTE_PORT_STOPPED) == 0)
1339                                         printf("Port %d can not be set back "
1340                                                         "to stopped\n", pi);
1341                                 printf("Fail to configure port %d tx queues\n", pi);
1342                                 /* try to reconfigure queues next time */
1343                                 port->need_reconfig_queues = 1;
1344                                 return -1;
1345                         }
1346                         /* setup rx queues */
1347                         for (qi = 0; qi < nb_rxq; qi++) {
1348                                 if ((numa_support) &&
1349                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1350                                         struct rte_mempool * mp =
1351                                                 mbuf_pool_find(rxring_numa[pi]);
1352                                         if (mp == NULL) {
1353                                                 printf("Failed to setup RX queue:"
1354                                                         "No mempool allocation"
1355                                                         "on the socket %d\n",
1356                                                         rxring_numa[pi]);
1357                                                 return -1;
1358                                         }
1359
1360                                         diag = rte_eth_rx_queue_setup(pi, qi,
1361                                              nb_rxd,rxring_numa[pi],
1362                                              &(port->rx_conf),mp);
1363                                 }
1364                                 else
1365                                         diag = rte_eth_rx_queue_setup(pi, qi,
1366                                              nb_rxd,port->socket_id,
1367                                              &(port->rx_conf),
1368                                              mbuf_pool_find(port->socket_id));
1369
1370                                 if (diag == 0)
1371                                         continue;
1372
1373
1374                                 /* Fail to setup rx queue, return */
1375                                 if (rte_atomic16_cmpset(&(port->port_status),
1376                                                         RTE_PORT_HANDLING,
1377                                                         RTE_PORT_STOPPED) == 0)
1378                                         printf("Port %d can not be set back "
1379                                                         "to stopped\n", pi);
1380                                 printf("Fail to configure port %d rx queues\n", pi);
1381                                 /* try to reconfigure queues next time */
1382                                 port->need_reconfig_queues = 1;
1383                                 return -1;
1384                         }
1385                 }
1386                 /* start port */
1387                 if (rte_eth_dev_start(pi) < 0) {
1388                         printf("Fail to start port %d\n", pi);
1389
1390                         /* Fail to setup rx queue, return */
1391                         if (rte_atomic16_cmpset(&(port->port_status),
1392                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393                                 printf("Port %d can not be set back to "
1394                                                         "stopped\n", pi);
1395                         continue;
1396                 }
1397
1398                 if (rte_atomic16_cmpset(&(port->port_status),
1399                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1400                         printf("Port %d can not be set into started\n", pi);
1401
1402                 rte_eth_macaddr_get(pi, &mac_addr);
1403                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1404                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1405                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1406                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1407
1408                 /* at least one port started, need checking link status */
1409                 need_check_link_status = 1;
1410         }
1411
1412         if (need_check_link_status == 1 && !no_link_check)
1413                 check_all_ports_link_status(RTE_PORT_ALL);
1414         else if (need_check_link_status == 0)
1415                 printf("Please stop the ports first\n");
1416
1417         printf("Done\n");
1418         return 0;
1419 }
1420
1421 void
1422 stop_port(portid_t pid)
1423 {
1424         portid_t pi;
1425         struct rte_port *port;
1426         int need_check_link_status = 0;
1427
1428         if (test_done == 0) {
1429                 printf("Please stop forwarding first\n");
1430                 return;
1431         }
1432         if (dcb_test) {
1433                 dcb_test = 0;
1434                 dcb_config = 0;
1435         }
1436
1437         if (port_id_is_invalid(pid, ENABLED_WARN))
1438                 return;
1439
1440         printf("Stopping ports...\n");
1441
1442         FOREACH_PORT(pi, ports) {
1443                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1444                         continue;
1445
1446                 port = &ports[pi];
1447                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1448                                                 RTE_PORT_HANDLING) == 0)
1449                         continue;
1450
1451                 rte_eth_dev_stop(pi);
1452
1453                 if (rte_atomic16_cmpset(&(port->port_status),
1454                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1455                         printf("Port %d can not be set into stopped\n", pi);
1456                 need_check_link_status = 1;
1457         }
1458         if (need_check_link_status && !no_link_check)
1459                 check_all_ports_link_status(RTE_PORT_ALL);
1460
1461         printf("Done\n");
1462 }
1463
1464 void
1465 close_port(portid_t pid)
1466 {
1467         portid_t pi;
1468         struct rte_port *port;
1469
1470         if (test_done == 0) {
1471                 printf("Please stop forwarding first\n");
1472                 return;
1473         }
1474
1475         if (port_id_is_invalid(pid, ENABLED_WARN))
1476                 return;
1477
1478         printf("Closing ports...\n");
1479
1480         FOREACH_PORT(pi, ports) {
1481                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1482                         continue;
1483
1484                 port = &ports[pi];
1485                 if (rte_atomic16_cmpset(&(port->port_status),
1486                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1487                         printf("Port %d is already closed\n", pi);
1488                         continue;
1489                 }
1490
1491                 if (rte_atomic16_cmpset(&(port->port_status),
1492                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1493                         printf("Port %d is now not stopped\n", pi);
1494                         continue;
1495                 }
1496
1497                 rte_eth_dev_close(pi);
1498
1499                 if (rte_atomic16_cmpset(&(port->port_status),
1500                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1501                         printf("Port %d can not be set into stopped\n", pi);
1502         }
1503
1504         printf("Done\n");
1505 }
1506
1507 void
1508 attach_port(char *identifier)
1509 {
1510         portid_t i, j, pi = 0;
1511
1512         printf("Attaching a new port...\n");
1513
1514         if (identifier == NULL) {
1515                 printf("Invalid parameters are specified\n");
1516                 return;
1517         }
1518
1519         if (test_done == 0) {
1520                 printf("Please stop forwarding first\n");
1521                 return;
1522         }
1523
1524         if (rte_eth_dev_attach(identifier, &pi))
1525                 return;
1526
1527         ports[pi].enabled = 1;
1528         reconfig(pi, rte_eth_dev_socket_id(pi));
1529         rte_eth_promiscuous_enable(pi);
1530
1531         nb_ports = rte_eth_dev_count();
1532
1533         /* set_default_fwd_ports_config(); */
1534         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1535         i = 0;
1536         FOREACH_PORT(j, ports) {
1537                 fwd_ports_ids[i] = j;
1538                 i++;
1539         }
1540         nb_cfg_ports = nb_ports;
1541         nb_fwd_ports++;
1542
1543         ports[pi].port_status = RTE_PORT_STOPPED;
1544
1545         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1546         printf("Done\n");
1547 }
1548
1549 void
1550 detach_port(uint8_t port_id)
1551 {
1552         portid_t i, pi = 0;
1553         char name[RTE_ETH_NAME_MAX_LEN];
1554
1555         printf("Detaching a port...\n");
1556
1557         if (!port_is_closed(port_id)) {
1558                 printf("Please close port first\n");
1559                 return;
1560         }
1561
1562         if (rte_eth_dev_detach(port_id, name))
1563                 return;
1564
1565         ports[port_id].enabled = 0;
1566         nb_ports = rte_eth_dev_count();
1567
1568         /* set_default_fwd_ports_config(); */
1569         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1570         i = 0;
1571         FOREACH_PORT(pi, ports) {
1572                 fwd_ports_ids[i] = pi;
1573                 i++;
1574         }
1575         nb_cfg_ports = nb_ports;
1576         nb_fwd_ports--;
1577
1578         printf("Port '%s' is detached. Now total ports is %d\n",
1579                         name, nb_ports);
1580         printf("Done\n");
1581         return;
1582 }
1583
1584 void
1585 pmd_test_exit(void)
1586 {
1587         portid_t pt_id;
1588
1589         if (test_done == 0)
1590                 stop_packet_forwarding();
1591
1592         FOREACH_PORT(pt_id, ports) {
1593                 printf("Stopping port %d...", pt_id);
1594                 fflush(stdout);
1595                 rte_eth_dev_close(pt_id);
1596                 printf("done\n");
1597         }
1598         printf("bye...\n");
1599 }
1600
1601 typedef void (*cmd_func_t)(void);
1602 struct pmd_test_command {
1603         const char *cmd_name;
1604         cmd_func_t cmd_func;
1605 };
1606
1607 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1608
1609 /* Check the link status of all ports in up to 9s, and print them finally */
1610 static void
1611 check_all_ports_link_status(uint32_t port_mask)
1612 {
1613 #define CHECK_INTERVAL 100 /* 100ms */
1614 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1615         uint8_t portid, count, all_ports_up, print_flag = 0;
1616         struct rte_eth_link link;
1617
1618         printf("Checking link statuses...\n");
1619         fflush(stdout);
1620         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1621                 all_ports_up = 1;
1622                 FOREACH_PORT(portid, ports) {
1623                         if ((port_mask & (1 << portid)) == 0)
1624                                 continue;
1625                         memset(&link, 0, sizeof(link));
1626                         rte_eth_link_get_nowait(portid, &link);
1627                         /* print link status if flag set */
1628                         if (print_flag == 1) {
1629                                 if (link.link_status)
1630                                         printf("Port %d Link Up - speed %u "
1631                                                 "Mbps - %s\n", (uint8_t)portid,
1632                                                 (unsigned)link.link_speed,
1633                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1634                                         ("full-duplex") : ("half-duplex\n"));
1635                                 else
1636                                         printf("Port %d Link Down\n",
1637                                                 (uint8_t)portid);
1638                                 continue;
1639                         }
1640                         /* clear all_ports_up flag if any link down */
1641                         if (link.link_status == 0) {
1642                                 all_ports_up = 0;
1643                                 break;
1644                         }
1645                 }
1646                 /* after finally printing all link status, get out */
1647                 if (print_flag == 1)
1648                         break;
1649
1650                 if (all_ports_up == 0) {
1651                         fflush(stdout);
1652                         rte_delay_ms(CHECK_INTERVAL);
1653                 }
1654
1655                 /* set the print_flag if all ports up or timeout */
1656                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1657                         print_flag = 1;
1658                 }
1659         }
1660 }
1661
1662 static int
1663 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1664 {
1665         uint16_t i;
1666         int diag;
1667         uint8_t mapping_found = 0;
1668
1669         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1670                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1671                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1672                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1673                                         tx_queue_stats_mappings[i].queue_id,
1674                                         tx_queue_stats_mappings[i].stats_counter_id);
1675                         if (diag != 0)
1676                                 return diag;
1677                         mapping_found = 1;
1678                 }
1679         }
1680         if (mapping_found)
1681                 port->tx_queue_stats_mapping_enabled = 1;
1682         return 0;
1683 }
1684
1685 static int
1686 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1687 {
1688         uint16_t i;
1689         int diag;
1690         uint8_t mapping_found = 0;
1691
1692         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1693                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1694                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1695                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1696                                         rx_queue_stats_mappings[i].queue_id,
1697                                         rx_queue_stats_mappings[i].stats_counter_id);
1698                         if (diag != 0)
1699                                 return diag;
1700                         mapping_found = 1;
1701                 }
1702         }
1703         if (mapping_found)
1704                 port->rx_queue_stats_mapping_enabled = 1;
1705         return 0;
1706 }
1707
1708 static void
1709 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1710 {
1711         int diag = 0;
1712
1713         diag = set_tx_queue_stats_mapping_registers(pi, port);
1714         if (diag != 0) {
1715                 if (diag == -ENOTSUP) {
1716                         port->tx_queue_stats_mapping_enabled = 0;
1717                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1718                 }
1719                 else
1720                         rte_exit(EXIT_FAILURE,
1721                                         "set_tx_queue_stats_mapping_registers "
1722                                         "failed for port id=%d diag=%d\n",
1723                                         pi, diag);
1724         }
1725
1726         diag = set_rx_queue_stats_mapping_registers(pi, port);
1727         if (diag != 0) {
1728                 if (diag == -ENOTSUP) {
1729                         port->rx_queue_stats_mapping_enabled = 0;
1730                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1731                 }
1732                 else
1733                         rte_exit(EXIT_FAILURE,
1734                                         "set_rx_queue_stats_mapping_registers "
1735                                         "failed for port id=%d diag=%d\n",
1736                                         pi, diag);
1737         }
1738 }
1739
1740 static void
1741 rxtx_port_config(struct rte_port *port)
1742 {
1743         port->rx_conf = port->dev_info.default_rxconf;
1744         port->tx_conf = port->dev_info.default_txconf;
1745
1746         /* Check if any RX/TX parameters have been passed */
1747         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1748                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1749
1750         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1751                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1752
1753         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1754                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1755
1756         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1757                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1758
1759         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1760                 port->rx_conf.rx_drop_en = rx_drop_en;
1761
1762         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1763                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1764
1765         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1766                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1767
1768         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1769                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1770
1771         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1772                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1773
1774         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1775                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1776
1777         if (txq_flags != RTE_PMD_PARAM_UNSET)
1778                 port->tx_conf.txq_flags = txq_flags;
1779 }
1780
1781 void
1782 init_port_config(void)
1783 {
1784         portid_t pid;
1785         struct rte_port *port;
1786
1787         FOREACH_PORT(pid, ports) {
1788                 port = &ports[pid];
1789                 port->dev_conf.rxmode = rx_mode;
1790                 port->dev_conf.fdir_conf = fdir_conf;
1791                 if (nb_rxq > 1) {
1792                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1793                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1794                 } else {
1795                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1796                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1797                 }
1798
1799                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1800                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1801                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1802                         else
1803                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1804                 }
1805
1806                 if (port->dev_info.max_vfs != 0) {
1807                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1808                                 port->dev_conf.rxmode.mq_mode =
1809                                         ETH_MQ_RX_VMDQ_RSS;
1810                         else
1811                                 port->dev_conf.rxmode.mq_mode =
1812                                         ETH_MQ_RX_NONE;
1813
1814                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1815                 }
1816
1817                 rxtx_port_config(port);
1818
1819                 rte_eth_macaddr_get(pid, &port->eth_addr);
1820
1821                 map_port_queue_stats_mapping_registers(pid, port);
1822 #ifdef RTE_NIC_BYPASS
1823                 rte_eth_dev_bypass_init(pid);
1824 #endif
1825         }
1826 }
1827
1828 void set_port_slave_flag(portid_t slave_pid)
1829 {
1830         struct rte_port *port;
1831
1832         port = &ports[slave_pid];
1833         port->slave_flag = 1;
1834 }
1835
1836 void clear_port_slave_flag(portid_t slave_pid)
1837 {
1838         struct rte_port *port;
1839
1840         port = &ports[slave_pid];
1841         port->slave_flag = 0;
1842 }
1843
1844 const uint16_t vlan_tags[] = {
1845                 0,  1,  2,  3,  4,  5,  6,  7,
1846                 8,  9, 10, 11,  12, 13, 14, 15,
1847                 16, 17, 18, 19, 20, 21, 22, 23,
1848                 24, 25, 26, 27, 28, 29, 30, 31
1849 };
1850
1851 static  int
1852 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1853 {
1854         uint8_t i;
1855
1856         /*
1857          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1858          * given above, and the number of traffic classes available for use.
1859          */
1860         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1861                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1862                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1863
1864                 /* VMDQ+DCB RX and TX configrations */
1865                 vmdq_rx_conf.enable_default_pool = 0;
1866                 vmdq_rx_conf.default_pool = 0;
1867                 vmdq_rx_conf.nb_queue_pools =
1868                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1869                 vmdq_tx_conf.nb_queue_pools =
1870                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1871
1872                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1873                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1874                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1875                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1876                 }
1877                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1878                         vmdq_rx_conf.dcb_queue[i] = i;
1879                         vmdq_tx_conf.dcb_queue[i] = i;
1880                 }
1881
1882                 /*set DCB mode of RX and TX of multiple queues*/
1883                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1884                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1885                 if (dcb_conf->pfc_en)
1886                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1887                 else
1888                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1889
1890                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1891                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1892                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1893                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1894         }
1895         else {
1896                 struct rte_eth_dcb_rx_conf rx_conf;
1897                 struct rte_eth_dcb_tx_conf tx_conf;
1898
1899                 /* queue mapping configuration of DCB RX and TX */
1900                 if (dcb_conf->num_tcs == ETH_4_TCS)
1901                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1902                 else
1903                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1904
1905                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1906                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1907
1908                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1909                         rx_conf.dcb_queue[i] = i;
1910                         tx_conf.dcb_queue[i] = i;
1911                 }
1912                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1913                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1914                 if (dcb_conf->pfc_en)
1915                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1916                 else
1917                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1918
1919                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1920                                 sizeof(struct rte_eth_dcb_rx_conf)));
1921                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1922                                 sizeof(struct rte_eth_dcb_tx_conf)));
1923         }
1924
1925         return 0;
1926 }
1927
1928 int
1929 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1930 {
1931         struct rte_eth_conf port_conf;
1932         struct rte_port *rte_port;
1933         int retval;
1934         uint16_t nb_vlan;
1935         uint16_t i;
1936
1937         /* rxq and txq configuration in dcb mode */
1938         nb_rxq = 128;
1939         nb_txq = 128;
1940         rx_free_thresh = 64;
1941
1942         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1943         /* Enter DCB configuration status */
1944         dcb_config = 1;
1945
1946         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1947         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1948         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1949         if (retval < 0)
1950                 return retval;
1951
1952         rte_port = &ports[pid];
1953         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1954
1955         rxtx_port_config(rte_port);
1956         /* VLAN filter */
1957         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1958         for (i = 0; i < nb_vlan; i++){
1959                 rx_vft_set(pid, vlan_tags[i], 1);
1960         }
1961
1962         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1963         map_port_queue_stats_mapping_registers(pid, rte_port);
1964
1965         rte_port->dcb_flag = 1;
1966
1967         return 0;
1968 }
1969
1970 static void
1971 init_port(void)
1972 {
1973         portid_t pid;
1974
1975         /* Configuration of Ethernet ports. */
1976         ports = rte_zmalloc("testpmd: ports",
1977                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1978                             RTE_CACHE_LINE_SIZE);
1979         if (ports == NULL) {
1980                 rte_exit(EXIT_FAILURE,
1981                                 "rte_zmalloc(%d struct rte_port) failed\n",
1982                                 RTE_MAX_ETHPORTS);
1983         }
1984
1985         /* enabled allocated ports */
1986         for (pid = 0; pid < nb_ports; pid++)
1987                 ports[pid].enabled = 1;
1988 }
1989
1990 int
1991 main(int argc, char** argv)
1992 {
1993         int  diag;
1994         uint8_t port_id;
1995
1996         diag = rte_eal_init(argc, argv);
1997         if (diag < 0)
1998                 rte_panic("Cannot init EAL\n");
1999
2000         nb_ports = (portid_t) rte_eth_dev_count();
2001         if (nb_ports == 0)
2002                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2003
2004         /* allocate port structures, and init them */
2005         init_port();
2006
2007         set_def_fwd_config();
2008         if (nb_lcores == 0)
2009                 rte_panic("Empty set of forwarding logical cores - check the "
2010                           "core mask supplied in the command parameters\n");
2011
2012         argc -= diag;
2013         argv += diag;
2014         if (argc > 1)
2015                 launch_args_parse(argc, argv);
2016
2017         if (nb_rxq > nb_txq)
2018                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2019                        "but nb_txq=%d will prevent to fully test it.\n",
2020                        nb_rxq, nb_txq);
2021
2022         init_config();
2023         if (start_port(RTE_PORT_ALL) != 0)
2024                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2025
2026         /* set all ports to promiscuous mode by default */
2027         FOREACH_PORT(port_id, ports)
2028                 rte_eth_promiscuous_enable(port_id);
2029
2030 #ifdef RTE_LIBRTE_CMDLINE
2031         if (interactive == 1) {
2032                 if (auto_start) {
2033                         printf("Start automatic packet forwarding\n");
2034                         start_packet_forwarding(0);
2035                 }
2036                 prompt();
2037         } else
2038 #endif
2039         {
2040                 char c;
2041                 int rc;
2042
2043                 printf("No commandline core given, start packet forwarding\n");
2044                 start_packet_forwarding(0);
2045                 printf("Press enter to exit\n");
2046                 rc = read(0, &c, 1);
2047                 if (rc < 0)
2048                         return 1;
2049         }
2050
2051         return 0;
2052 }