386bf84d30099ec9545d73c4c8daf83e3a402498
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301         },
302         .drop_queue = 127,
303 };
304
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
306
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
309
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
312
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
315
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint32_t port_mask);
319
320 /*
321  * Check if all the ports are started.
322  * If yes, return positive value. If not, return zero.
323  */
324 static int all_ports_started(void);
325
326 /*
327  * Find next enabled port
328  */
329 portid_t
330 find_next_port(portid_t p, struct rte_port *ports, int size)
331 {
332         if (ports == NULL)
333                 rte_exit(-EINVAL, "failed to find a next port id\n");
334
335         while ((p < size) && (ports[p].enabled == 0))
336                 p++;
337         return p;
338 }
339
340 /*
341  * Setup default configuration.
342  */
343 static void
344 set_default_fwd_lcores_config(void)
345 {
346         unsigned int i;
347         unsigned int nb_lc;
348
349         nb_lc = 0;
350         for (i = 0; i < RTE_MAX_LCORE; i++) {
351                 if (! rte_lcore_is_enabled(i))
352                         continue;
353                 if (i == rte_get_master_lcore())
354                         continue;
355                 fwd_lcores_cpuids[nb_lc++] = i;
356         }
357         nb_lcores = (lcoreid_t) nb_lc;
358         nb_cfg_lcores = nb_lcores;
359         nb_fwd_lcores = 1;
360 }
361
362 static void
363 set_def_peer_eth_addrs(void)
364 {
365         portid_t i;
366
367         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
368                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
369                 peer_eth_addrs[i].addr_bytes[5] = i;
370         }
371 }
372
373 static void
374 set_default_fwd_ports_config(void)
375 {
376         portid_t pt_id;
377
378         for (pt_id = 0; pt_id < nb_ports; pt_id++)
379                 fwd_ports_ids[pt_id] = pt_id;
380
381         nb_cfg_ports = nb_ports;
382         nb_fwd_ports = nb_ports;
383 }
384
385 void
386 set_def_fwd_config(void)
387 {
388         set_default_fwd_lcores_config();
389         set_def_peer_eth_addrs();
390         set_default_fwd_ports_config();
391 }
392
393 /*
394  * Configuration initialisation done once at init time.
395  */
396 static void
397 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
398                  unsigned int socket_id)
399 {
400         char pool_name[RTE_MEMPOOL_NAMESIZE];
401         struct rte_mempool *rte_mp;
402         uint32_t mb_size;
403
404         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
405         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
406
407 #ifdef RTE_LIBRTE_PMD_XENVIRT
408         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
409                 (unsigned) mb_mempool_cache,
410                 sizeof(struct rte_pktmbuf_pool_private),
411                 rte_pktmbuf_pool_init, NULL,
412                 rte_pktmbuf_init, NULL,
413                 socket_id, 0);
414
415
416
417 #else
418         if (mp_anon != 0)
419                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
420                                     (unsigned) mb_mempool_cache,
421                                     sizeof(struct rte_pktmbuf_pool_private),
422                                     rte_pktmbuf_pool_init, NULL,
423                                     rte_pktmbuf_init, NULL,
424                                     socket_id, 0);
425         else
426                 /* wrapper to rte_mempool_create() */
427                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
428                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
429
430 #endif
431
432         if (rte_mp == NULL) {
433                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
434                                                 "failed\n", socket_id);
435         } else if (verbose_level > 0) {
436                 rte_mempool_dump(stdout, rte_mp);
437         }
438 }
439
440 /*
441  * Check given socket id is valid or not with NUMA mode,
442  * if valid, return 0, else return -1
443  */
444 static int
445 check_socket_id(const unsigned int socket_id)
446 {
447         static int warning_once = 0;
448
449         if (socket_id >= MAX_SOCKET) {
450                 if (!warning_once && numa_support)
451                         printf("Warning: NUMA should be configured manually by"
452                                " using --port-numa-config and"
453                                " --ring-numa-config parameters along with"
454                                " --numa.\n");
455                 warning_once = 1;
456                 return -1;
457         }
458         return 0;
459 }
460
461 static void
462 init_config(void)
463 {
464         portid_t pid;
465         struct rte_port *port;
466         struct rte_mempool *mbp;
467         unsigned int nb_mbuf_per_pool;
468         lcoreid_t  lc_id;
469         uint8_t port_per_socket[MAX_SOCKET];
470
471         memset(port_per_socket,0,MAX_SOCKET);
472         /* Configuration of logical cores. */
473         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
474                                 sizeof(struct fwd_lcore *) * nb_lcores,
475                                 RTE_CACHE_LINE_SIZE);
476         if (fwd_lcores == NULL) {
477                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
478                                                         "failed\n", nb_lcores);
479         }
480         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
481                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
482                                                sizeof(struct fwd_lcore),
483                                                RTE_CACHE_LINE_SIZE);
484                 if (fwd_lcores[lc_id] == NULL) {
485                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
486                                                                 "failed\n");
487                 }
488                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
489         }
490
491         /*
492          * Create pools of mbuf.
493          * If NUMA support is disabled, create a single pool of mbuf in
494          * socket 0 memory by default.
495          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
496          *
497          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
498          * nb_txd can be configured at run time.
499          */
500         if (param_total_num_mbufs)
501                 nb_mbuf_per_pool = param_total_num_mbufs;
502         else {
503                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
504                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
505
506                 if (!numa_support)
507                         nb_mbuf_per_pool =
508                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
509         }
510
511         if (!numa_support) {
512                 if (socket_num == UMA_NO_CONFIG)
513                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
514                 else
515                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
516                                                  socket_num);
517         }
518
519         FOREACH_PORT(pid, ports) {
520                 port = &ports[pid];
521                 rte_eth_dev_info_get(pid, &port->dev_info);
522
523                 if (numa_support) {
524                         if (port_numa[pid] != NUMA_NO_CONFIG)
525                                 port_per_socket[port_numa[pid]]++;
526                         else {
527                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
528
529                                 /* if socket_id is invalid, set to 0 */
530                                 if (check_socket_id(socket_id) < 0)
531                                         socket_id = 0;
532                                 port_per_socket[socket_id]++;
533                         }
534                 }
535
536                 /* set flag to initialize port/queue */
537                 port->need_reconfig = 1;
538                 port->need_reconfig_queues = 1;
539         }
540
541         if (numa_support) {
542                 uint8_t i;
543                 unsigned int nb_mbuf;
544
545                 if (param_total_num_mbufs)
546                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
547
548                 for (i = 0; i < MAX_SOCKET; i++) {
549                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
550                         if (nb_mbuf)
551                                 mbuf_pool_create(mbuf_data_size,
552                                                 nb_mbuf,i);
553                 }
554         }
555         init_port_config();
556
557         /*
558          * Records which Mbuf pool to use by each logical core, if needed.
559          */
560         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
561                 mbp = mbuf_pool_find(
562                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
563
564                 if (mbp == NULL)
565                         mbp = mbuf_pool_find(0);
566                 fwd_lcores[lc_id]->mbp = mbp;
567         }
568
569         /* Configuration of packet forwarding streams. */
570         if (init_fwd_streams() < 0)
571                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
572 }
573
574
575 void
576 reconfig(portid_t new_port_id, unsigned socket_id)
577 {
578         struct rte_port *port;
579
580         /* Reconfiguration of Ethernet ports. */
581         port = &ports[new_port_id];
582         rte_eth_dev_info_get(new_port_id, &port->dev_info);
583
584         /* set flag to initialize port/queue */
585         port->need_reconfig = 1;
586         port->need_reconfig_queues = 1;
587         port->socket_id = socket_id;
588
589         init_port_config();
590 }
591
592
593 int
594 init_fwd_streams(void)
595 {
596         portid_t pid;
597         struct rte_port *port;
598         streamid_t sm_id, nb_fwd_streams_new;
599
600         /* set socket id according to numa or not */
601         FOREACH_PORT(pid, ports) {
602                 port = &ports[pid];
603                 if (nb_rxq > port->dev_info.max_rx_queues) {
604                         printf("Fail: nb_rxq(%d) is greater than "
605                                 "max_rx_queues(%d)\n", nb_rxq,
606                                 port->dev_info.max_rx_queues);
607                         return -1;
608                 }
609                 if (nb_txq > port->dev_info.max_tx_queues) {
610                         printf("Fail: nb_txq(%d) is greater than "
611                                 "max_tx_queues(%d)\n", nb_txq,
612                                 port->dev_info.max_tx_queues);
613                         return -1;
614                 }
615                 if (numa_support) {
616                         if (port_numa[pid] != NUMA_NO_CONFIG)
617                                 port->socket_id = port_numa[pid];
618                         else {
619                                 port->socket_id = rte_eth_dev_socket_id(pid);
620
621                                 /* if socket_id is invalid, set to 0 */
622                                 if (check_socket_id(port->socket_id) < 0)
623                                         port->socket_id = 0;
624                         }
625                 }
626                 else {
627                         if (socket_num == UMA_NO_CONFIG)
628                                 port->socket_id = 0;
629                         else
630                                 port->socket_id = socket_num;
631                 }
632         }
633
634         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
635         if (nb_fwd_streams_new == nb_fwd_streams)
636                 return 0;
637         /* clear the old */
638         if (fwd_streams != NULL) {
639                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
640                         if (fwd_streams[sm_id] == NULL)
641                                 continue;
642                         rte_free(fwd_streams[sm_id]);
643                         fwd_streams[sm_id] = NULL;
644                 }
645                 rte_free(fwd_streams);
646                 fwd_streams = NULL;
647         }
648
649         /* init new */
650         nb_fwd_streams = nb_fwd_streams_new;
651         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
652                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
653         if (fwd_streams == NULL)
654                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
655                                                 "failed\n", nb_fwd_streams);
656
657         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
658                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
659                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
660                 if (fwd_streams[sm_id] == NULL)
661                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
662                                                                 " failed\n");
663         }
664
665         return 0;
666 }
667
668 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
669 static void
670 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
671 {
672         unsigned int total_burst;
673         unsigned int nb_burst;
674         unsigned int burst_stats[3];
675         uint16_t pktnb_stats[3];
676         uint16_t nb_pkt;
677         int burst_percent[3];
678
679         /*
680          * First compute the total number of packet bursts and the
681          * two highest numbers of bursts of the same number of packets.
682          */
683         total_burst = 0;
684         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
685         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
686         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
687                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
688                 if (nb_burst == 0)
689                         continue;
690                 total_burst += nb_burst;
691                 if (nb_burst > burst_stats[0]) {
692                         burst_stats[1] = burst_stats[0];
693                         pktnb_stats[1] = pktnb_stats[0];
694                         burst_stats[0] = nb_burst;
695                         pktnb_stats[0] = nb_pkt;
696                 }
697         }
698         if (total_burst == 0)
699                 return;
700         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
701         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
702                burst_percent[0], (int) pktnb_stats[0]);
703         if (burst_stats[0] == total_burst) {
704                 printf("]\n");
705                 return;
706         }
707         if (burst_stats[0] + burst_stats[1] == total_burst) {
708                 printf(" + %d%% of %d pkts]\n",
709                        100 - burst_percent[0], pktnb_stats[1]);
710                 return;
711         }
712         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
713         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
714         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
715                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
716                 return;
717         }
718         printf(" + %d%% of %d pkts + %d%% of others]\n",
719                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
720 }
721 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
722
723 static void
724 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
725 {
726         struct rte_port *port;
727         uint8_t i;
728
729         static const char *fwd_stats_border = "----------------------";
730
731         port = &ports[port_id];
732         printf("\n  %s Forward statistics for port %-2d %s\n",
733                fwd_stats_border, port_id, fwd_stats_border);
734
735         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
736                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
737                        "%-"PRIu64"\n",
738                        stats->ipackets, stats->imissed,
739                        (uint64_t) (stats->ipackets + stats->imissed));
740
741                 if (cur_fwd_eng == &csum_fwd_engine)
742                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
743                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
744                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
745                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
746                                "RX-error: %-"PRIu64"\n",
747                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
748                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
749                 }
750
751                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
752                        "%-"PRIu64"\n",
753                        stats->opackets, port->tx_dropped,
754                        (uint64_t) (stats->opackets + port->tx_dropped));
755         }
756         else {
757                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
758                        "%14"PRIu64"\n",
759                        stats->ipackets, stats->imissed,
760                        (uint64_t) (stats->ipackets + stats->imissed));
761
762                 if (cur_fwd_eng == &csum_fwd_engine)
763                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
764                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
765                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
766                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
767                                "    RX-error:%"PRIu64"\n",
768                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
769                         printf("  RX-nombufs:             %14"PRIu64"\n",
770                                stats->rx_nombuf);
771                 }
772
773                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
774                        "%14"PRIu64"\n",
775                        stats->opackets, port->tx_dropped,
776                        (uint64_t) (stats->opackets + port->tx_dropped));
777         }
778
779         /* Display statistics of XON/XOFF pause frames, if any. */
780         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
781              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
782                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
783                        stats->rx_pause_xoff, stats->rx_pause_xon);
784                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
785                        stats->tx_pause_xoff, stats->tx_pause_xon);
786         }
787
788 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
789         if (port->rx_stream)
790                 pkt_burst_stats_display("RX",
791                         &port->rx_stream->rx_burst_stats);
792         if (port->tx_stream)
793                 pkt_burst_stats_display("TX",
794                         &port->tx_stream->tx_burst_stats);
795 #endif
796         /* stats fdir */
797         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
798                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
799                        stats->fdirmiss,
800                        stats->fdirmatch);
801
802         if (port->rx_queue_stats_mapping_enabled) {
803                 printf("\n");
804                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
805                         printf("  Stats reg %2d RX-packets:%14"PRIu64
806                                "     RX-errors:%14"PRIu64
807                                "    RX-bytes:%14"PRIu64"\n",
808                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
809                 }
810                 printf("\n");
811         }
812         if (port->tx_queue_stats_mapping_enabled) {
813                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814                         printf("  Stats reg %2d TX-packets:%14"PRIu64
815                                "                                 TX-bytes:%14"PRIu64"\n",
816                                i, stats->q_opackets[i], stats->q_obytes[i]);
817                 }
818         }
819
820         printf("  %s--------------------------------%s\n",
821                fwd_stats_border, fwd_stats_border);
822 }
823
824 static void
825 fwd_stream_stats_display(streamid_t stream_id)
826 {
827         struct fwd_stream *fs;
828         static const char *fwd_top_stats_border = "-------";
829
830         fs = fwd_streams[stream_id];
831         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
832             (fs->fwd_dropped == 0))
833                 return;
834         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
835                "TX Port=%2d/Queue=%2d %s\n",
836                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
837                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
838         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
839                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
840
841         /* if checksum mode */
842         if (cur_fwd_eng == &csum_fwd_engine) {
843                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
844                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
845         }
846
847 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
848         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
849         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
850 #endif
851 }
852
853 static void
854 flush_fwd_rx_queues(void)
855 {
856         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
857         portid_t  rxp;
858         portid_t port_id;
859         queueid_t rxq;
860         uint16_t  nb_rx;
861         uint16_t  i;
862         uint8_t   j;
863
864         for (j = 0; j < 2; j++) {
865                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
866                         for (rxq = 0; rxq < nb_rxq; rxq++) {
867                                 port_id = fwd_ports_ids[rxp];
868                                 do {
869                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
870                                                 pkts_burst, MAX_PKT_BURST);
871                                         for (i = 0; i < nb_rx; i++)
872                                                 rte_pktmbuf_free(pkts_burst[i]);
873                                 } while (nb_rx > 0);
874                         }
875                 }
876                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
877         }
878 }
879
880 static void
881 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
882 {
883         struct fwd_stream **fsm;
884         streamid_t nb_fs;
885         streamid_t sm_id;
886
887         fsm = &fwd_streams[fc->stream_idx];
888         nb_fs = fc->stream_nb;
889         do {
890                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
891                         (*pkt_fwd)(fsm[sm_id]);
892         } while (! fc->stopped);
893 }
894
895 static int
896 start_pkt_forward_on_core(void *fwd_arg)
897 {
898         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
899                              cur_fwd_config.fwd_eng->packet_fwd);
900         return 0;
901 }
902
903 /*
904  * Run the TXONLY packet forwarding engine to send a single burst of packets.
905  * Used to start communication flows in network loopback test configurations.
906  */
907 static int
908 run_one_txonly_burst_on_core(void *fwd_arg)
909 {
910         struct fwd_lcore *fwd_lc;
911         struct fwd_lcore tmp_lcore;
912
913         fwd_lc = (struct fwd_lcore *) fwd_arg;
914         tmp_lcore = *fwd_lc;
915         tmp_lcore.stopped = 1;
916         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
917         return 0;
918 }
919
920 /*
921  * Launch packet forwarding:
922  *     - Setup per-port forwarding context.
923  *     - launch logical cores with their forwarding configuration.
924  */
925 static void
926 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
927 {
928         port_fwd_begin_t port_fwd_begin;
929         unsigned int i;
930         unsigned int lc_id;
931         int diag;
932
933         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
934         if (port_fwd_begin != NULL) {
935                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
936                         (*port_fwd_begin)(fwd_ports_ids[i]);
937         }
938         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
939                 lc_id = fwd_lcores_cpuids[i];
940                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
941                         fwd_lcores[i]->stopped = 0;
942                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
943                                                      fwd_lcores[i], lc_id);
944                         if (diag != 0)
945                                 printf("launch lcore %u failed - diag=%d\n",
946                                        lc_id, diag);
947                 }
948         }
949 }
950
951 /*
952  * Launch packet forwarding configuration.
953  */
954 void
955 start_packet_forwarding(int with_tx_first)
956 {
957         port_fwd_begin_t port_fwd_begin;
958         port_fwd_end_t  port_fwd_end;
959         struct rte_port *port;
960         unsigned int i;
961         portid_t   pt_id;
962         streamid_t sm_id;
963
964         if (all_ports_started() == 0) {
965                 printf("Not all ports were started\n");
966                 return;
967         }
968         if (test_done == 0) {
969                 printf("Packet forwarding already started\n");
970                 return;
971         }
972         if(dcb_test) {
973                 for (i = 0; i < nb_fwd_ports; i++) {
974                         pt_id = fwd_ports_ids[i];
975                         port = &ports[pt_id];
976                         if (!port->dcb_flag) {
977                                 printf("In DCB mode, all forwarding ports must "
978                                        "be configured in this mode.\n");
979                                 return;
980                         }
981                 }
982                 if (nb_fwd_lcores == 1) {
983                         printf("In DCB mode,the nb forwarding cores "
984                                "should be larger than 1.\n");
985                         return;
986                 }
987         }
988         test_done = 0;
989
990         if(!no_flush_rx)
991                 flush_fwd_rx_queues();
992
993         fwd_config_setup();
994         rxtx_config_display();
995
996         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
997                 pt_id = fwd_ports_ids[i];
998                 port = &ports[pt_id];
999                 rte_eth_stats_get(pt_id, &port->stats);
1000                 port->tx_dropped = 0;
1001
1002                 map_port_queue_stats_mapping_registers(pt_id, port);
1003         }
1004         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1005                 fwd_streams[sm_id]->rx_packets = 0;
1006                 fwd_streams[sm_id]->tx_packets = 0;
1007                 fwd_streams[sm_id]->fwd_dropped = 0;
1008                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1009                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1010
1011 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1012                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1013                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1014                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1015                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1016 #endif
1017 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1018                 fwd_streams[sm_id]->core_cycles = 0;
1019 #endif
1020         }
1021         if (with_tx_first) {
1022                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1023                 if (port_fwd_begin != NULL) {
1024                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1025                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1026                 }
1027                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1028                 rte_eal_mp_wait_lcore();
1029                 port_fwd_end = tx_only_engine.port_fwd_end;
1030                 if (port_fwd_end != NULL) {
1031                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1032                                 (*port_fwd_end)(fwd_ports_ids[i]);
1033                 }
1034         }
1035         launch_packet_forwarding(start_pkt_forward_on_core);
1036 }
1037
1038 void
1039 stop_packet_forwarding(void)
1040 {
1041         struct rte_eth_stats stats;
1042         struct rte_port *port;
1043         port_fwd_end_t  port_fwd_end;
1044         int i;
1045         portid_t   pt_id;
1046         streamid_t sm_id;
1047         lcoreid_t  lc_id;
1048         uint64_t total_recv;
1049         uint64_t total_xmit;
1050         uint64_t total_rx_dropped;
1051         uint64_t total_tx_dropped;
1052         uint64_t total_rx_nombuf;
1053         uint64_t tx_dropped;
1054         uint64_t rx_bad_ip_csum;
1055         uint64_t rx_bad_l4_csum;
1056 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1057         uint64_t fwd_cycles;
1058 #endif
1059         static const char *acc_stats_border = "+++++++++++++++";
1060
1061         if (all_ports_started() == 0) {
1062                 printf("Not all ports were started\n");
1063                 return;
1064         }
1065         if (test_done) {
1066                 printf("Packet forwarding not started\n");
1067                 return;
1068         }
1069         printf("Telling cores to stop...");
1070         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1071                 fwd_lcores[lc_id]->stopped = 1;
1072         printf("\nWaiting for lcores to finish...\n");
1073         rte_eal_mp_wait_lcore();
1074         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1075         if (port_fwd_end != NULL) {
1076                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1077                         pt_id = fwd_ports_ids[i];
1078                         (*port_fwd_end)(pt_id);
1079                 }
1080         }
1081 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1082         fwd_cycles = 0;
1083 #endif
1084         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1085                 if (cur_fwd_config.nb_fwd_streams >
1086                     cur_fwd_config.nb_fwd_ports) {
1087                         fwd_stream_stats_display(sm_id);
1088                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1089                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1090                 } else {
1091                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1092                                 fwd_streams[sm_id];
1093                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1094                                 fwd_streams[sm_id];
1095                 }
1096                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1097                 tx_dropped = (uint64_t) (tx_dropped +
1098                                          fwd_streams[sm_id]->fwd_dropped);
1099                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1100
1101                 rx_bad_ip_csum =
1102                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1103                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1104                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1105                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1106                                                         rx_bad_ip_csum;
1107
1108                 rx_bad_l4_csum =
1109                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1110                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1111                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1112                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1113                                                         rx_bad_l4_csum;
1114
1115 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1116                 fwd_cycles = (uint64_t) (fwd_cycles +
1117                                          fwd_streams[sm_id]->core_cycles);
1118 #endif
1119         }
1120         total_recv = 0;
1121         total_xmit = 0;
1122         total_rx_dropped = 0;
1123         total_tx_dropped = 0;
1124         total_rx_nombuf  = 0;
1125         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1126                 pt_id = fwd_ports_ids[i];
1127
1128                 port = &ports[pt_id];
1129                 rte_eth_stats_get(pt_id, &stats);
1130                 stats.ipackets -= port->stats.ipackets;
1131                 port->stats.ipackets = 0;
1132                 stats.opackets -= port->stats.opackets;
1133                 port->stats.opackets = 0;
1134                 stats.ibytes   -= port->stats.ibytes;
1135                 port->stats.ibytes = 0;
1136                 stats.obytes   -= port->stats.obytes;
1137                 port->stats.obytes = 0;
1138                 stats.imissed  -= port->stats.imissed;
1139                 port->stats.imissed = 0;
1140                 stats.oerrors  -= port->stats.oerrors;
1141                 port->stats.oerrors = 0;
1142                 stats.rx_nombuf -= port->stats.rx_nombuf;
1143                 port->stats.rx_nombuf = 0;
1144                 stats.fdirmatch -= port->stats.fdirmatch;
1145                 port->stats.rx_nombuf = 0;
1146                 stats.fdirmiss -= port->stats.fdirmiss;
1147                 port->stats.rx_nombuf = 0;
1148
1149                 total_recv += stats.ipackets;
1150                 total_xmit += stats.opackets;
1151                 total_rx_dropped += stats.imissed;
1152                 total_tx_dropped += port->tx_dropped;
1153                 total_rx_nombuf  += stats.rx_nombuf;
1154
1155                 fwd_port_stats_display(pt_id, &stats);
1156         }
1157         printf("\n  %s Accumulated forward statistics for all ports"
1158                "%s\n",
1159                acc_stats_border, acc_stats_border);
1160         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1161                "%-"PRIu64"\n"
1162                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1163                "%-"PRIu64"\n",
1164                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1165                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1166         if (total_rx_nombuf > 0)
1167                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1168         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1169                "%s\n",
1170                acc_stats_border, acc_stats_border);
1171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1172         if (total_recv > 0)
1173                 printf("\n  CPU cycles/packet=%u (total cycles="
1174                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1175                        (unsigned int)(fwd_cycles / total_recv),
1176                        fwd_cycles, total_recv);
1177 #endif
1178         printf("\nDone.\n");
1179         test_done = 1;
1180 }
1181
1182 void
1183 dev_set_link_up(portid_t pid)
1184 {
1185         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1186                 printf("\nSet link up fail.\n");
1187 }
1188
1189 void
1190 dev_set_link_down(portid_t pid)
1191 {
1192         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1193                 printf("\nSet link down fail.\n");
1194 }
1195
1196 static int
1197 all_ports_started(void)
1198 {
1199         portid_t pi;
1200         struct rte_port *port;
1201
1202         FOREACH_PORT(pi, ports) {
1203                 port = &ports[pi];
1204                 /* Check if there is a port which is not started */
1205                 if ((port->port_status != RTE_PORT_STARTED) &&
1206                         (port->slave_flag == 0))
1207                         return 0;
1208         }
1209
1210         /* No port is not started */
1211         return 1;
1212 }
1213
1214 int
1215 all_ports_stopped(void)
1216 {
1217         portid_t pi;
1218         struct rte_port *port;
1219
1220         FOREACH_PORT(pi, ports) {
1221                 port = &ports[pi];
1222                 if ((port->port_status != RTE_PORT_STOPPED) &&
1223                         (port->slave_flag == 0))
1224                         return 0;
1225         }
1226
1227         return 1;
1228 }
1229
1230 int
1231 port_is_started(portid_t port_id)
1232 {
1233         if (port_id_is_invalid(port_id, ENABLED_WARN))
1234                 return 0;
1235
1236         if (ports[port_id].port_status != RTE_PORT_STARTED)
1237                 return 0;
1238
1239         return 1;
1240 }
1241
1242 static int
1243 port_is_closed(portid_t port_id)
1244 {
1245         if (port_id_is_invalid(port_id, ENABLED_WARN))
1246                 return 0;
1247
1248         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1249                 return 0;
1250
1251         return 1;
1252 }
1253
1254 int
1255 start_port(portid_t pid)
1256 {
1257         int diag, need_check_link_status = -1;
1258         portid_t pi;
1259         queueid_t qi;
1260         struct rte_port *port;
1261         struct ether_addr mac_addr;
1262
1263         if (test_done == 0) {
1264                 printf("Please stop forwarding first\n");
1265                 return -1;
1266         }
1267
1268         if (port_id_is_invalid(pid, ENABLED_WARN))
1269                 return 0;
1270
1271         if (init_fwd_streams() < 0) {
1272                 printf("Fail from init_fwd_streams()\n");
1273                 return -1;
1274         }
1275
1276         if(dcb_config)
1277                 dcb_test = 1;
1278         FOREACH_PORT(pi, ports) {
1279                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1280                         continue;
1281
1282                 need_check_link_status = 0;
1283                 port = &ports[pi];
1284                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1285                                                  RTE_PORT_HANDLING) == 0) {
1286                         printf("Port %d is now not stopped\n", pi);
1287                         continue;
1288                 }
1289
1290                 if (port->need_reconfig > 0) {
1291                         port->need_reconfig = 0;
1292
1293                         printf("Configuring Port %d (socket %u)\n", pi,
1294                                         port->socket_id);
1295                         /* configure port */
1296                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1297                                                 &(port->dev_conf));
1298                         if (diag != 0) {
1299                                 if (rte_atomic16_cmpset(&(port->port_status),
1300                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1301                                         printf("Port %d can not be set back "
1302                                                         "to stopped\n", pi);
1303                                 printf("Fail to configure port %d\n", pi);
1304                                 /* try to reconfigure port next time */
1305                                 port->need_reconfig = 1;
1306                                 return -1;
1307                         }
1308                 }
1309                 if (port->need_reconfig_queues > 0) {
1310                         port->need_reconfig_queues = 0;
1311                         /* setup tx queues */
1312                         for (qi = 0; qi < nb_txq; qi++) {
1313                                 if ((numa_support) &&
1314                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1315                                         diag = rte_eth_tx_queue_setup(pi, qi,
1316                                                 nb_txd,txring_numa[pi],
1317                                                 &(port->tx_conf));
1318                                 else
1319                                         diag = rte_eth_tx_queue_setup(pi, qi,
1320                                                 nb_txd,port->socket_id,
1321                                                 &(port->tx_conf));
1322
1323                                 if (diag == 0)
1324                                         continue;
1325
1326                                 /* Fail to setup tx queue, return */
1327                                 if (rte_atomic16_cmpset(&(port->port_status),
1328                                                         RTE_PORT_HANDLING,
1329                                                         RTE_PORT_STOPPED) == 0)
1330                                         printf("Port %d can not be set back "
1331                                                         "to stopped\n", pi);
1332                                 printf("Fail to configure port %d tx queues\n", pi);
1333                                 /* try to reconfigure queues next time */
1334                                 port->need_reconfig_queues = 1;
1335                                 return -1;
1336                         }
1337                         /* setup rx queues */
1338                         for (qi = 0; qi < nb_rxq; qi++) {
1339                                 if ((numa_support) &&
1340                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1341                                         struct rte_mempool * mp =
1342                                                 mbuf_pool_find(rxring_numa[pi]);
1343                                         if (mp == NULL) {
1344                                                 printf("Failed to setup RX queue:"
1345                                                         "No mempool allocation"
1346                                                         "on the socket %d\n",
1347                                                         rxring_numa[pi]);
1348                                                 return -1;
1349                                         }
1350
1351                                         diag = rte_eth_rx_queue_setup(pi, qi,
1352                                              nb_rxd,rxring_numa[pi],
1353                                              &(port->rx_conf),mp);
1354                                 }
1355                                 else
1356                                         diag = rte_eth_rx_queue_setup(pi, qi,
1357                                              nb_rxd,port->socket_id,
1358                                              &(port->rx_conf),
1359                                              mbuf_pool_find(port->socket_id));
1360
1361                                 if (diag == 0)
1362                                         continue;
1363
1364
1365                                 /* Fail to setup rx queue, return */
1366                                 if (rte_atomic16_cmpset(&(port->port_status),
1367                                                         RTE_PORT_HANDLING,
1368                                                         RTE_PORT_STOPPED) == 0)
1369                                         printf("Port %d can not be set back "
1370                                                         "to stopped\n", pi);
1371                                 printf("Fail to configure port %d rx queues\n", pi);
1372                                 /* try to reconfigure queues next time */
1373                                 port->need_reconfig_queues = 1;
1374                                 return -1;
1375                         }
1376                 }
1377                 /* start port */
1378                 if (rte_eth_dev_start(pi) < 0) {
1379                         printf("Fail to start port %d\n", pi);
1380
1381                         /* Fail to setup rx queue, return */
1382                         if (rte_atomic16_cmpset(&(port->port_status),
1383                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1384                                 printf("Port %d can not be set back to "
1385                                                         "stopped\n", pi);
1386                         continue;
1387                 }
1388
1389                 if (rte_atomic16_cmpset(&(port->port_status),
1390                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1391                         printf("Port %d can not be set into started\n", pi);
1392
1393                 rte_eth_macaddr_get(pi, &mac_addr);
1394                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1395                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1396                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1397                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1398
1399                 /* at least one port started, need checking link status */
1400                 need_check_link_status = 1;
1401         }
1402
1403         if (need_check_link_status == 1 && !no_link_check)
1404                 check_all_ports_link_status(RTE_PORT_ALL);
1405         else if (need_check_link_status == 0)
1406                 printf("Please stop the ports first\n");
1407
1408         printf("Done\n");
1409         return 0;
1410 }
1411
1412 void
1413 stop_port(portid_t pid)
1414 {
1415         portid_t pi;
1416         struct rte_port *port;
1417         int need_check_link_status = 0;
1418
1419         if (test_done == 0) {
1420                 printf("Please stop forwarding first\n");
1421                 return;
1422         }
1423         if (dcb_test) {
1424                 dcb_test = 0;
1425                 dcb_config = 0;
1426         }
1427
1428         if (port_id_is_invalid(pid, ENABLED_WARN))
1429                 return;
1430
1431         printf("Stopping ports...\n");
1432
1433         FOREACH_PORT(pi, ports) {
1434                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1435                         continue;
1436
1437                 port = &ports[pi];
1438                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1439                                                 RTE_PORT_HANDLING) == 0)
1440                         continue;
1441
1442                 rte_eth_dev_stop(pi);
1443
1444                 if (rte_atomic16_cmpset(&(port->port_status),
1445                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1446                         printf("Port %d can not be set into stopped\n", pi);
1447                 need_check_link_status = 1;
1448         }
1449         if (need_check_link_status && !no_link_check)
1450                 check_all_ports_link_status(RTE_PORT_ALL);
1451
1452         printf("Done\n");
1453 }
1454
1455 void
1456 close_port(portid_t pid)
1457 {
1458         portid_t pi;
1459         struct rte_port *port;
1460
1461         if (test_done == 0) {
1462                 printf("Please stop forwarding first\n");
1463                 return;
1464         }
1465
1466         if (port_id_is_invalid(pid, ENABLED_WARN))
1467                 return;
1468
1469         printf("Closing ports...\n");
1470
1471         FOREACH_PORT(pi, ports) {
1472                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1473                         continue;
1474
1475                 port = &ports[pi];
1476                 if (rte_atomic16_cmpset(&(port->port_status),
1477                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1478                         printf("Port %d is already closed\n", pi);
1479                         continue;
1480                 }
1481
1482                 if (rte_atomic16_cmpset(&(port->port_status),
1483                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1484                         printf("Port %d is now not stopped\n", pi);
1485                         continue;
1486                 }
1487
1488                 rte_eth_dev_close(pi);
1489
1490                 if (rte_atomic16_cmpset(&(port->port_status),
1491                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1492                         printf("Port %d can not be set into stopped\n", pi);
1493         }
1494
1495         printf("Done\n");
1496 }
1497
1498 void
1499 attach_port(char *identifier)
1500 {
1501         portid_t i, j, pi = 0;
1502
1503         printf("Attaching a new port...\n");
1504
1505         if (identifier == NULL) {
1506                 printf("Invalid parameters are specified\n");
1507                 return;
1508         }
1509
1510         if (test_done == 0) {
1511                 printf("Please stop forwarding first\n");
1512                 return;
1513         }
1514
1515         if (rte_eth_dev_attach(identifier, &pi))
1516                 return;
1517
1518         ports[pi].enabled = 1;
1519         reconfig(pi, rte_eth_dev_socket_id(pi));
1520         rte_eth_promiscuous_enable(pi);
1521
1522         nb_ports = rte_eth_dev_count();
1523
1524         /* set_default_fwd_ports_config(); */
1525         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1526         i = 0;
1527         FOREACH_PORT(j, ports) {
1528                 fwd_ports_ids[i] = j;
1529                 i++;
1530         }
1531         nb_cfg_ports = nb_ports;
1532         nb_fwd_ports++;
1533
1534         ports[pi].port_status = RTE_PORT_STOPPED;
1535
1536         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1537         printf("Done\n");
1538 }
1539
1540 void
1541 detach_port(uint8_t port_id)
1542 {
1543         portid_t i, pi = 0;
1544         char name[RTE_ETH_NAME_MAX_LEN];
1545
1546         printf("Detaching a port...\n");
1547
1548         if (!port_is_closed(port_id)) {
1549                 printf("Please close port first\n");
1550                 return;
1551         }
1552
1553         if (rte_eth_dev_detach(port_id, name))
1554                 return;
1555
1556         ports[port_id].enabled = 0;
1557         nb_ports = rte_eth_dev_count();
1558
1559         /* set_default_fwd_ports_config(); */
1560         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1561         i = 0;
1562         FOREACH_PORT(pi, ports) {
1563                 fwd_ports_ids[i] = pi;
1564                 i++;
1565         }
1566         nb_cfg_ports = nb_ports;
1567         nb_fwd_ports--;
1568
1569         printf("Port '%s' is detached. Now total ports is %d\n",
1570                         name, nb_ports);
1571         printf("Done\n");
1572         return;
1573 }
1574
1575 void
1576 pmd_test_exit(void)
1577 {
1578         portid_t pt_id;
1579
1580         if (test_done == 0)
1581                 stop_packet_forwarding();
1582
1583         FOREACH_PORT(pt_id, ports) {
1584                 printf("Stopping port %d...", pt_id);
1585                 fflush(stdout);
1586                 rte_eth_dev_close(pt_id);
1587                 printf("done\n");
1588         }
1589         printf("bye...\n");
1590 }
1591
1592 typedef void (*cmd_func_t)(void);
1593 struct pmd_test_command {
1594         const char *cmd_name;
1595         cmd_func_t cmd_func;
1596 };
1597
1598 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1599
1600 /* Check the link status of all ports in up to 9s, and print them finally */
1601 static void
1602 check_all_ports_link_status(uint32_t port_mask)
1603 {
1604 #define CHECK_INTERVAL 100 /* 100ms */
1605 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1606         uint8_t portid, count, all_ports_up, print_flag = 0;
1607         struct rte_eth_link link;
1608
1609         printf("Checking link statuses...\n");
1610         fflush(stdout);
1611         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1612                 all_ports_up = 1;
1613                 FOREACH_PORT(portid, ports) {
1614                         if ((port_mask & (1 << portid)) == 0)
1615                                 continue;
1616                         memset(&link, 0, sizeof(link));
1617                         rte_eth_link_get_nowait(portid, &link);
1618                         /* print link status if flag set */
1619                         if (print_flag == 1) {
1620                                 if (link.link_status)
1621                                         printf("Port %d Link Up - speed %u "
1622                                                 "Mbps - %s\n", (uint8_t)portid,
1623                                                 (unsigned)link.link_speed,
1624                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1625                                         ("full-duplex") : ("half-duplex\n"));
1626                                 else
1627                                         printf("Port %d Link Down\n",
1628                                                 (uint8_t)portid);
1629                                 continue;
1630                         }
1631                         /* clear all_ports_up flag if any link down */
1632                         if (link.link_status == 0) {
1633                                 all_ports_up = 0;
1634                                 break;
1635                         }
1636                 }
1637                 /* after finally printing all link status, get out */
1638                 if (print_flag == 1)
1639                         break;
1640
1641                 if (all_ports_up == 0) {
1642                         fflush(stdout);
1643                         rte_delay_ms(CHECK_INTERVAL);
1644                 }
1645
1646                 /* set the print_flag if all ports up or timeout */
1647                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1648                         print_flag = 1;
1649                 }
1650         }
1651 }
1652
1653 static int
1654 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1655 {
1656         uint16_t i;
1657         int diag;
1658         uint8_t mapping_found = 0;
1659
1660         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1661                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1662                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1663                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1664                                         tx_queue_stats_mappings[i].queue_id,
1665                                         tx_queue_stats_mappings[i].stats_counter_id);
1666                         if (diag != 0)
1667                                 return diag;
1668                         mapping_found = 1;
1669                 }
1670         }
1671         if (mapping_found)
1672                 port->tx_queue_stats_mapping_enabled = 1;
1673         return 0;
1674 }
1675
1676 static int
1677 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1678 {
1679         uint16_t i;
1680         int diag;
1681         uint8_t mapping_found = 0;
1682
1683         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1684                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1685                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1686                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1687                                         rx_queue_stats_mappings[i].queue_id,
1688                                         rx_queue_stats_mappings[i].stats_counter_id);
1689                         if (diag != 0)
1690                                 return diag;
1691                         mapping_found = 1;
1692                 }
1693         }
1694         if (mapping_found)
1695                 port->rx_queue_stats_mapping_enabled = 1;
1696         return 0;
1697 }
1698
1699 static void
1700 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1701 {
1702         int diag = 0;
1703
1704         diag = set_tx_queue_stats_mapping_registers(pi, port);
1705         if (diag != 0) {
1706                 if (diag == -ENOTSUP) {
1707                         port->tx_queue_stats_mapping_enabled = 0;
1708                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1709                 }
1710                 else
1711                         rte_exit(EXIT_FAILURE,
1712                                         "set_tx_queue_stats_mapping_registers "
1713                                         "failed for port id=%d diag=%d\n",
1714                                         pi, diag);
1715         }
1716
1717         diag = set_rx_queue_stats_mapping_registers(pi, port);
1718         if (diag != 0) {
1719                 if (diag == -ENOTSUP) {
1720                         port->rx_queue_stats_mapping_enabled = 0;
1721                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1722                 }
1723                 else
1724                         rte_exit(EXIT_FAILURE,
1725                                         "set_rx_queue_stats_mapping_registers "
1726                                         "failed for port id=%d diag=%d\n",
1727                                         pi, diag);
1728         }
1729 }
1730
1731 static void
1732 rxtx_port_config(struct rte_port *port)
1733 {
1734         port->rx_conf = port->dev_info.default_rxconf;
1735         port->tx_conf = port->dev_info.default_txconf;
1736
1737         /* Check if any RX/TX parameters have been passed */
1738         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1739                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1740
1741         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1742                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1743
1744         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1745                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1746
1747         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1748                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1749
1750         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1751                 port->rx_conf.rx_drop_en = rx_drop_en;
1752
1753         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1754                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1755
1756         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1757                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1758
1759         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1760                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1761
1762         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1763                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1764
1765         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1766                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1767
1768         if (txq_flags != RTE_PMD_PARAM_UNSET)
1769                 port->tx_conf.txq_flags = txq_flags;
1770 }
1771
1772 void
1773 init_port_config(void)
1774 {
1775         portid_t pid;
1776         struct rte_port *port;
1777
1778         FOREACH_PORT(pid, ports) {
1779                 port = &ports[pid];
1780                 port->dev_conf.rxmode = rx_mode;
1781                 port->dev_conf.fdir_conf = fdir_conf;
1782                 if (nb_rxq > 1) {
1783                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1784                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1785                 } else {
1786                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1787                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1788                 }
1789
1790                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1791                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1792                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1793                         else
1794                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1795                 }
1796
1797                 if (port->dev_info.max_vfs != 0) {
1798                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1799                                 port->dev_conf.rxmode.mq_mode =
1800                                         ETH_MQ_RX_VMDQ_RSS;
1801                         else
1802                                 port->dev_conf.rxmode.mq_mode =
1803                                         ETH_MQ_RX_NONE;
1804
1805                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1806                 }
1807
1808                 rxtx_port_config(port);
1809
1810                 rte_eth_macaddr_get(pid, &port->eth_addr);
1811
1812                 map_port_queue_stats_mapping_registers(pid, port);
1813 #ifdef RTE_NIC_BYPASS
1814                 rte_eth_dev_bypass_init(pid);
1815 #endif
1816         }
1817 }
1818
1819 void set_port_slave_flag(portid_t slave_pid)
1820 {
1821         struct rte_port *port;
1822
1823         port = &ports[slave_pid];
1824         port->slave_flag = 1;
1825 }
1826
1827 void clear_port_slave_flag(portid_t slave_pid)
1828 {
1829         struct rte_port *port;
1830
1831         port = &ports[slave_pid];
1832         port->slave_flag = 0;
1833 }
1834
1835 const uint16_t vlan_tags[] = {
1836                 0,  1,  2,  3,  4,  5,  6,  7,
1837                 8,  9, 10, 11,  12, 13, 14, 15,
1838                 16, 17, 18, 19, 20, 21, 22, 23,
1839                 24, 25, 26, 27, 28, 29, 30, 31
1840 };
1841
1842 static  int
1843 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1844 {
1845         uint8_t i;
1846
1847         /*
1848          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1849          * given above, and the number of traffic classes available for use.
1850          */
1851         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1852                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1853                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1854
1855                 /* VMDQ+DCB RX and TX configrations */
1856                 vmdq_rx_conf.enable_default_pool = 0;
1857                 vmdq_rx_conf.default_pool = 0;
1858                 vmdq_rx_conf.nb_queue_pools =
1859                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1860                 vmdq_tx_conf.nb_queue_pools =
1861                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1862
1863                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1864                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1865                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1866                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1867                 }
1868                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1869                         vmdq_rx_conf.dcb_queue[i] = i;
1870                         vmdq_tx_conf.dcb_queue[i] = i;
1871                 }
1872
1873                 /*set DCB mode of RX and TX of multiple queues*/
1874                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1875                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1876                 if (dcb_conf->pfc_en)
1877                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1878                 else
1879                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1880
1881                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1882                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1883                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1884                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1885         }
1886         else {
1887                 struct rte_eth_dcb_rx_conf rx_conf;
1888                 struct rte_eth_dcb_tx_conf tx_conf;
1889
1890                 /* queue mapping configuration of DCB RX and TX */
1891                 if (dcb_conf->num_tcs == ETH_4_TCS)
1892                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1893                 else
1894                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1895
1896                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1897                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1898
1899                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1900                         rx_conf.dcb_queue[i] = i;
1901                         tx_conf.dcb_queue[i] = i;
1902                 }
1903                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1904                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1905                 if (dcb_conf->pfc_en)
1906                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1907                 else
1908                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1909
1910                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1911                                 sizeof(struct rte_eth_dcb_rx_conf)));
1912                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1913                                 sizeof(struct rte_eth_dcb_tx_conf)));
1914         }
1915
1916         return 0;
1917 }
1918
1919 int
1920 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1921 {
1922         struct rte_eth_conf port_conf;
1923         struct rte_port *rte_port;
1924         int retval;
1925         uint16_t nb_vlan;
1926         uint16_t i;
1927
1928         /* rxq and txq configuration in dcb mode */
1929         nb_rxq = 128;
1930         nb_txq = 128;
1931         rx_free_thresh = 64;
1932
1933         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1934         /* Enter DCB configuration status */
1935         dcb_config = 1;
1936
1937         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1938         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1939         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1940         if (retval < 0)
1941                 return retval;
1942
1943         rte_port = &ports[pid];
1944         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1945
1946         rxtx_port_config(rte_port);
1947         /* VLAN filter */
1948         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1949         for (i = 0; i < nb_vlan; i++){
1950                 rx_vft_set(pid, vlan_tags[i], 1);
1951         }
1952
1953         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1954         map_port_queue_stats_mapping_registers(pid, rte_port);
1955
1956         rte_port->dcb_flag = 1;
1957
1958         return 0;
1959 }
1960
1961 static void
1962 init_port(void)
1963 {
1964         portid_t pid;
1965
1966         /* Configuration of Ethernet ports. */
1967         ports = rte_zmalloc("testpmd: ports",
1968                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1969                             RTE_CACHE_LINE_SIZE);
1970         if (ports == NULL) {
1971                 rte_exit(EXIT_FAILURE,
1972                                 "rte_zmalloc(%d struct rte_port) failed\n",
1973                                 RTE_MAX_ETHPORTS);
1974         }
1975
1976         /* enabled allocated ports */
1977         for (pid = 0; pid < nb_ports; pid++)
1978                 ports[pid].enabled = 1;
1979 }
1980
1981 int
1982 main(int argc, char** argv)
1983 {
1984         int  diag;
1985         uint8_t port_id;
1986
1987         diag = rte_eal_init(argc, argv);
1988         if (diag < 0)
1989                 rte_panic("Cannot init EAL\n");
1990
1991         nb_ports = (portid_t) rte_eth_dev_count();
1992         if (nb_ports == 0)
1993                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
1994
1995         /* allocate port structures, and init them */
1996         init_port();
1997
1998         set_def_fwd_config();
1999         if (nb_lcores == 0)
2000                 rte_panic("Empty set of forwarding logical cores - check the "
2001                           "core mask supplied in the command parameters\n");
2002
2003         argc -= diag;
2004         argv += diag;
2005         if (argc > 1)
2006                 launch_args_parse(argc, argv);
2007
2008         if (nb_rxq > nb_txq)
2009                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2010                        "but nb_txq=%d will prevent to fully test it.\n",
2011                        nb_rxq, nb_txq);
2012
2013         init_config();
2014         if (start_port(RTE_PORT_ALL) != 0)
2015                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2016
2017         /* set all ports to promiscuous mode by default */
2018         FOREACH_PORT(port_id, ports)
2019                 rte_eth_promiscuous_enable(port_id);
2020
2021 #ifdef RTE_LIBRTE_CMDLINE
2022         if (interactive == 1) {
2023                 if (auto_start) {
2024                         printf("Start automatic packet forwarding\n");
2025                         start_packet_forwarding(0);
2026                 }
2027                 prompt();
2028         } else
2029 #endif
2030         {
2031                 char c;
2032                 int rc;
2033
2034                 printf("No commandline core given, start packet forwarding\n");
2035                 start_packet_forwarding(0);
2036                 printf("Press enter to exit\n");
2037                 rc = read(0, &c, 1);
2038                 if (rc < 0)
2039                         return 1;
2040         }
2041
2042         return 0;
2043 }