ethdev: remove missed packets from error counter
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421                 (unsigned) mb_mempool_cache,
422                 sizeof(struct rte_pktmbuf_pool_private),
423                 rte_pktmbuf_pool_init, NULL,
424                 rte_pktmbuf_init, NULL,
425                 socket_id, 0);
426
427
428
429 #else
430         if (mp_anon != 0)
431                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
432                                     (unsigned) mb_mempool_cache,
433                                     sizeof(struct rte_pktmbuf_pool_private),
434                                     rte_pktmbuf_pool_init, NULL,
435                                     rte_pktmbuf_init, NULL,
436                                     socket_id, 0);
437         else
438                 /* wrapper to rte_mempool_create() */
439                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
441
442 #endif
443
444         if (rte_mp == NULL) {
445                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
446                                                 "failed\n", socket_id);
447         } else if (verbose_level > 0) {
448                 rte_mempool_dump(stdout, rte_mp);
449         }
450 }
451
452 /*
453  * Check given socket id is valid or not with NUMA mode,
454  * if valid, return 0, else return -1
455  */
456 static int
457 check_socket_id(const unsigned int socket_id)
458 {
459         static int warning_once = 0;
460
461         if (socket_id >= max_socket) {
462                 if (!warning_once && numa_support)
463                         printf("Warning: NUMA should be configured manually by"
464                                " using --port-numa-config and"
465                                " --ring-numa-config parameters along with"
466                                " --numa.\n");
467                 warning_once = 1;
468                 return -1;
469         }
470         return 0;
471 }
472
473 static void
474 init_config(void)
475 {
476         portid_t pid;
477         struct rte_port *port;
478         struct rte_mempool *mbp;
479         unsigned int nb_mbuf_per_pool;
480         lcoreid_t  lc_id;
481         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
482
483         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
484         /* Configuration of logical cores. */
485         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
486                                 sizeof(struct fwd_lcore *) * nb_lcores,
487                                 RTE_CACHE_LINE_SIZE);
488         if (fwd_lcores == NULL) {
489                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
490                                                         "failed\n", nb_lcores);
491         }
492         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
493                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
494                                                sizeof(struct fwd_lcore),
495                                                RTE_CACHE_LINE_SIZE);
496                 if (fwd_lcores[lc_id] == NULL) {
497                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
498                                                                 "failed\n");
499                 }
500                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
501         }
502
503         /*
504          * Create pools of mbuf.
505          * If NUMA support is disabled, create a single pool of mbuf in
506          * socket 0 memory by default.
507          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
508          *
509          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
510          * nb_txd can be configured at run time.
511          */
512         if (param_total_num_mbufs)
513                 nb_mbuf_per_pool = param_total_num_mbufs;
514         else {
515                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
516                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
517
518                 if (!numa_support)
519                         nb_mbuf_per_pool =
520                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
521         }
522
523         if (!numa_support) {
524                 if (socket_num == UMA_NO_CONFIG)
525                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
526                 else
527                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
528                                                  socket_num);
529         }
530
531         FOREACH_PORT(pid, ports) {
532                 port = &ports[pid];
533                 rte_eth_dev_info_get(pid, &port->dev_info);
534
535                 if (numa_support) {
536                         if (port_numa[pid] != NUMA_NO_CONFIG)
537                                 port_per_socket[port_numa[pid]]++;
538                         else {
539                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
540
541                                 /* if socket_id is invalid, set to 0 */
542                                 if (check_socket_id(socket_id) < 0)
543                                         socket_id = 0;
544                                 port_per_socket[socket_id]++;
545                         }
546                 }
547
548                 /* set flag to initialize port/queue */
549                 port->need_reconfig = 1;
550                 port->need_reconfig_queues = 1;
551         }
552
553         if (numa_support) {
554                 uint8_t i;
555                 unsigned int nb_mbuf;
556
557                 if (param_total_num_mbufs)
558                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
559
560                 for (i = 0; i < max_socket; i++) {
561                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
562                         if (nb_mbuf)
563                                 mbuf_pool_create(mbuf_data_size,
564                                                 nb_mbuf,i);
565                 }
566         }
567         init_port_config();
568
569         /*
570          * Records which Mbuf pool to use by each logical core, if needed.
571          */
572         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
573                 mbp = mbuf_pool_find(
574                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
575
576                 if (mbp == NULL)
577                         mbp = mbuf_pool_find(0);
578                 fwd_lcores[lc_id]->mbp = mbp;
579         }
580
581         /* Configuration of packet forwarding streams. */
582         if (init_fwd_streams() < 0)
583                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
584 }
585
586
587 void
588 reconfig(portid_t new_port_id, unsigned socket_id)
589 {
590         struct rte_port *port;
591
592         /* Reconfiguration of Ethernet ports. */
593         port = &ports[new_port_id];
594         rte_eth_dev_info_get(new_port_id, &port->dev_info);
595
596         /* set flag to initialize port/queue */
597         port->need_reconfig = 1;
598         port->need_reconfig_queues = 1;
599         port->socket_id = socket_id;
600
601         init_port_config();
602 }
603
604
605 int
606 init_fwd_streams(void)
607 {
608         portid_t pid;
609         struct rte_port *port;
610         streamid_t sm_id, nb_fwd_streams_new;
611         queueid_t q;
612
613         /* set socket id according to numa or not */
614         FOREACH_PORT(pid, ports) {
615                 port = &ports[pid];
616                 if (nb_rxq > port->dev_info.max_rx_queues) {
617                         printf("Fail: nb_rxq(%d) is greater than "
618                                 "max_rx_queues(%d)\n", nb_rxq,
619                                 port->dev_info.max_rx_queues);
620                         return -1;
621                 }
622                 if (nb_txq > port->dev_info.max_tx_queues) {
623                         printf("Fail: nb_txq(%d) is greater than "
624                                 "max_tx_queues(%d)\n", nb_txq,
625                                 port->dev_info.max_tx_queues);
626                         return -1;
627                 }
628                 if (numa_support) {
629                         if (port_numa[pid] != NUMA_NO_CONFIG)
630                                 port->socket_id = port_numa[pid];
631                         else {
632                                 port->socket_id = rte_eth_dev_socket_id(pid);
633
634                                 /* if socket_id is invalid, set to 0 */
635                                 if (check_socket_id(port->socket_id) < 0)
636                                         port->socket_id = 0;
637                         }
638                 }
639                 else {
640                         if (socket_num == UMA_NO_CONFIG)
641                                 port->socket_id = 0;
642                         else
643                                 port->socket_id = socket_num;
644                 }
645         }
646
647         q = RTE_MAX(nb_rxq, nb_txq);
648         if (q == 0) {
649                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
650                 return -1;
651         }
652         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
653         if (nb_fwd_streams_new == nb_fwd_streams)
654                 return 0;
655         /* clear the old */
656         if (fwd_streams != NULL) {
657                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
658                         if (fwd_streams[sm_id] == NULL)
659                                 continue;
660                         rte_free(fwd_streams[sm_id]);
661                         fwd_streams[sm_id] = NULL;
662                 }
663                 rte_free(fwd_streams);
664                 fwd_streams = NULL;
665         }
666
667         /* init new */
668         nb_fwd_streams = nb_fwd_streams_new;
669         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
670                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
671         if (fwd_streams == NULL)
672                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
673                                                 "failed\n", nb_fwd_streams);
674
675         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
676                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
677                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
678                 if (fwd_streams[sm_id] == NULL)
679                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
680                                                                 " failed\n");
681         }
682
683         return 0;
684 }
685
686 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
687 static void
688 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
689 {
690         unsigned int total_burst;
691         unsigned int nb_burst;
692         unsigned int burst_stats[3];
693         uint16_t pktnb_stats[3];
694         uint16_t nb_pkt;
695         int burst_percent[3];
696
697         /*
698          * First compute the total number of packet bursts and the
699          * two highest numbers of bursts of the same number of packets.
700          */
701         total_burst = 0;
702         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
703         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
704         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
705                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
706                 if (nb_burst == 0)
707                         continue;
708                 total_burst += nb_burst;
709                 if (nb_burst > burst_stats[0]) {
710                         burst_stats[1] = burst_stats[0];
711                         pktnb_stats[1] = pktnb_stats[0];
712                         burst_stats[0] = nb_burst;
713                         pktnb_stats[0] = nb_pkt;
714                 }
715         }
716         if (total_burst == 0)
717                 return;
718         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
719         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
720                burst_percent[0], (int) pktnb_stats[0]);
721         if (burst_stats[0] == total_burst) {
722                 printf("]\n");
723                 return;
724         }
725         if (burst_stats[0] + burst_stats[1] == total_burst) {
726                 printf(" + %d%% of %d pkts]\n",
727                        100 - burst_percent[0], pktnb_stats[1]);
728                 return;
729         }
730         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
731         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
732         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
733                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
734                 return;
735         }
736         printf(" + %d%% of %d pkts + %d%% of others]\n",
737                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
738 }
739 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
740
741 static void
742 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
743 {
744         struct rte_port *port;
745         uint8_t i;
746
747         static const char *fwd_stats_border = "----------------------";
748
749         port = &ports[port_id];
750         printf("\n  %s Forward statistics for port %-2d %s\n",
751                fwd_stats_border, port_id, fwd_stats_border);
752
753         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
754                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
755                        "%-"PRIu64"\n",
756                        stats->ipackets, stats->imissed,
757                        (uint64_t) (stats->ipackets + stats->imissed));
758
759                 if (cur_fwd_eng == &csum_fwd_engine)
760                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
761                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
762                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
763                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
764                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
765                 }
766
767                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
768                        "%-"PRIu64"\n",
769                        stats->opackets, port->tx_dropped,
770                        (uint64_t) (stats->opackets + port->tx_dropped));
771         }
772         else {
773                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
774                        "%14"PRIu64"\n",
775                        stats->ipackets, stats->imissed,
776                        (uint64_t) (stats->ipackets + stats->imissed));
777
778                 if (cur_fwd_eng == &csum_fwd_engine)
779                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
780                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
781                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
782                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
783                         printf("  RX-nombufs:             %14"PRIu64"\n",
784                                stats->rx_nombuf);
785                 }
786
787                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
788                        "%14"PRIu64"\n",
789                        stats->opackets, port->tx_dropped,
790                        (uint64_t) (stats->opackets + port->tx_dropped));
791         }
792
793 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
794         if (port->rx_stream)
795                 pkt_burst_stats_display("RX",
796                         &port->rx_stream->rx_burst_stats);
797         if (port->tx_stream)
798                 pkt_burst_stats_display("TX",
799                         &port->tx_stream->tx_burst_stats);
800 #endif
801
802         if (port->rx_queue_stats_mapping_enabled) {
803                 printf("\n");
804                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
805                         printf("  Stats reg %2d RX-packets:%14"PRIu64
806                                "     RX-errors:%14"PRIu64
807                                "    RX-bytes:%14"PRIu64"\n",
808                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
809                 }
810                 printf("\n");
811         }
812         if (port->tx_queue_stats_mapping_enabled) {
813                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814                         printf("  Stats reg %2d TX-packets:%14"PRIu64
815                                "                                 TX-bytes:%14"PRIu64"\n",
816                                i, stats->q_opackets[i], stats->q_obytes[i]);
817                 }
818         }
819
820         printf("  %s--------------------------------%s\n",
821                fwd_stats_border, fwd_stats_border);
822 }
823
824 static void
825 fwd_stream_stats_display(streamid_t stream_id)
826 {
827         struct fwd_stream *fs;
828         static const char *fwd_top_stats_border = "-------";
829
830         fs = fwd_streams[stream_id];
831         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
832             (fs->fwd_dropped == 0))
833                 return;
834         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
835                "TX Port=%2d/Queue=%2d %s\n",
836                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
837                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
838         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
839                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
840
841         /* if checksum mode */
842         if (cur_fwd_eng == &csum_fwd_engine) {
843                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
844                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
845         }
846
847 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
848         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
849         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
850 #endif
851 }
852
853 static void
854 flush_fwd_rx_queues(void)
855 {
856         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
857         portid_t  rxp;
858         portid_t port_id;
859         queueid_t rxq;
860         uint16_t  nb_rx;
861         uint16_t  i;
862         uint8_t   j;
863
864         for (j = 0; j < 2; j++) {
865                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
866                         for (rxq = 0; rxq < nb_rxq; rxq++) {
867                                 port_id = fwd_ports_ids[rxp];
868                                 do {
869                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
870                                                 pkts_burst, MAX_PKT_BURST);
871                                         for (i = 0; i < nb_rx; i++)
872                                                 rte_pktmbuf_free(pkts_burst[i]);
873                                 } while (nb_rx > 0);
874                         }
875                 }
876                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
877         }
878 }
879
880 static void
881 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
882 {
883         struct fwd_stream **fsm;
884         streamid_t nb_fs;
885         streamid_t sm_id;
886
887         fsm = &fwd_streams[fc->stream_idx];
888         nb_fs = fc->stream_nb;
889         do {
890                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
891                         (*pkt_fwd)(fsm[sm_id]);
892         } while (! fc->stopped);
893 }
894
895 static int
896 start_pkt_forward_on_core(void *fwd_arg)
897 {
898         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
899                              cur_fwd_config.fwd_eng->packet_fwd);
900         return 0;
901 }
902
903 /*
904  * Run the TXONLY packet forwarding engine to send a single burst of packets.
905  * Used to start communication flows in network loopback test configurations.
906  */
907 static int
908 run_one_txonly_burst_on_core(void *fwd_arg)
909 {
910         struct fwd_lcore *fwd_lc;
911         struct fwd_lcore tmp_lcore;
912
913         fwd_lc = (struct fwd_lcore *) fwd_arg;
914         tmp_lcore = *fwd_lc;
915         tmp_lcore.stopped = 1;
916         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
917         return 0;
918 }
919
920 /*
921  * Launch packet forwarding:
922  *     - Setup per-port forwarding context.
923  *     - launch logical cores with their forwarding configuration.
924  */
925 static void
926 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
927 {
928         port_fwd_begin_t port_fwd_begin;
929         unsigned int i;
930         unsigned int lc_id;
931         int diag;
932
933         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
934         if (port_fwd_begin != NULL) {
935                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
936                         (*port_fwd_begin)(fwd_ports_ids[i]);
937         }
938         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
939                 lc_id = fwd_lcores_cpuids[i];
940                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
941                         fwd_lcores[i]->stopped = 0;
942                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
943                                                      fwd_lcores[i], lc_id);
944                         if (diag != 0)
945                                 printf("launch lcore %u failed - diag=%d\n",
946                                        lc_id, diag);
947                 }
948         }
949 }
950
951 /*
952  * Launch packet forwarding configuration.
953  */
954 void
955 start_packet_forwarding(int with_tx_first)
956 {
957         port_fwd_begin_t port_fwd_begin;
958         port_fwd_end_t  port_fwd_end;
959         struct rte_port *port;
960         unsigned int i;
961         portid_t   pt_id;
962         streamid_t sm_id;
963
964         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
965                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
966
967         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
968                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
969
970         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
971                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
972                 (!nb_rxq || !nb_txq))
973                 rte_exit(EXIT_FAILURE,
974                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
975                         cur_fwd_eng->fwd_mode_name);
976
977         if (all_ports_started() == 0) {
978                 printf("Not all ports were started\n");
979                 return;
980         }
981         if (test_done == 0) {
982                 printf("Packet forwarding already started\n");
983                 return;
984         }
985         if(dcb_test) {
986                 for (i = 0; i < nb_fwd_ports; i++) {
987                         pt_id = fwd_ports_ids[i];
988                         port = &ports[pt_id];
989                         if (!port->dcb_flag) {
990                                 printf("In DCB mode, all forwarding ports must "
991                                        "be configured in this mode.\n");
992                                 return;
993                         }
994                 }
995                 if (nb_fwd_lcores == 1) {
996                         printf("In DCB mode,the nb forwarding cores "
997                                "should be larger than 1.\n");
998                         return;
999                 }
1000         }
1001         test_done = 0;
1002
1003         if(!no_flush_rx)
1004                 flush_fwd_rx_queues();
1005
1006         fwd_config_setup();
1007         rxtx_config_display();
1008
1009         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1010                 pt_id = fwd_ports_ids[i];
1011                 port = &ports[pt_id];
1012                 rte_eth_stats_get(pt_id, &port->stats);
1013                 port->tx_dropped = 0;
1014
1015                 map_port_queue_stats_mapping_registers(pt_id, port);
1016         }
1017         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1018                 fwd_streams[sm_id]->rx_packets = 0;
1019                 fwd_streams[sm_id]->tx_packets = 0;
1020                 fwd_streams[sm_id]->fwd_dropped = 0;
1021                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1022                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1023
1024 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1025                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1026                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1027                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1028                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1029 #endif
1030 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1031                 fwd_streams[sm_id]->core_cycles = 0;
1032 #endif
1033         }
1034         if (with_tx_first) {
1035                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1036                 if (port_fwd_begin != NULL) {
1037                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1038                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1039                 }
1040                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1041                 rte_eal_mp_wait_lcore();
1042                 port_fwd_end = tx_only_engine.port_fwd_end;
1043                 if (port_fwd_end != NULL) {
1044                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1045                                 (*port_fwd_end)(fwd_ports_ids[i]);
1046                 }
1047         }
1048         launch_packet_forwarding(start_pkt_forward_on_core);
1049 }
1050
1051 void
1052 stop_packet_forwarding(void)
1053 {
1054         struct rte_eth_stats stats;
1055         struct rte_port *port;
1056         port_fwd_end_t  port_fwd_end;
1057         int i;
1058         portid_t   pt_id;
1059         streamid_t sm_id;
1060         lcoreid_t  lc_id;
1061         uint64_t total_recv;
1062         uint64_t total_xmit;
1063         uint64_t total_rx_dropped;
1064         uint64_t total_tx_dropped;
1065         uint64_t total_rx_nombuf;
1066         uint64_t tx_dropped;
1067         uint64_t rx_bad_ip_csum;
1068         uint64_t rx_bad_l4_csum;
1069 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1070         uint64_t fwd_cycles;
1071 #endif
1072         static const char *acc_stats_border = "+++++++++++++++";
1073
1074         if (all_ports_started() == 0) {
1075                 printf("Not all ports were started\n");
1076                 return;
1077         }
1078         if (test_done) {
1079                 printf("Packet forwarding not started\n");
1080                 return;
1081         }
1082         printf("Telling cores to stop...");
1083         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1084                 fwd_lcores[lc_id]->stopped = 1;
1085         printf("\nWaiting for lcores to finish...\n");
1086         rte_eal_mp_wait_lcore();
1087         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1088         if (port_fwd_end != NULL) {
1089                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1090                         pt_id = fwd_ports_ids[i];
1091                         (*port_fwd_end)(pt_id);
1092                 }
1093         }
1094 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1095         fwd_cycles = 0;
1096 #endif
1097         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1098                 if (cur_fwd_config.nb_fwd_streams >
1099                     cur_fwd_config.nb_fwd_ports) {
1100                         fwd_stream_stats_display(sm_id);
1101                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1102                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1103                 } else {
1104                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1105                                 fwd_streams[sm_id];
1106                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1107                                 fwd_streams[sm_id];
1108                 }
1109                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1110                 tx_dropped = (uint64_t) (tx_dropped +
1111                                          fwd_streams[sm_id]->fwd_dropped);
1112                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1113
1114                 rx_bad_ip_csum =
1115                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1116                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1117                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1118                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1119                                                         rx_bad_ip_csum;
1120
1121                 rx_bad_l4_csum =
1122                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1123                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1124                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1125                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1126                                                         rx_bad_l4_csum;
1127
1128 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1129                 fwd_cycles = (uint64_t) (fwd_cycles +
1130                                          fwd_streams[sm_id]->core_cycles);
1131 #endif
1132         }
1133         total_recv = 0;
1134         total_xmit = 0;
1135         total_rx_dropped = 0;
1136         total_tx_dropped = 0;
1137         total_rx_nombuf  = 0;
1138         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1139                 pt_id = fwd_ports_ids[i];
1140
1141                 port = &ports[pt_id];
1142                 rte_eth_stats_get(pt_id, &stats);
1143                 stats.ipackets -= port->stats.ipackets;
1144                 port->stats.ipackets = 0;
1145                 stats.opackets -= port->stats.opackets;
1146                 port->stats.opackets = 0;
1147                 stats.ibytes   -= port->stats.ibytes;
1148                 port->stats.ibytes = 0;
1149                 stats.obytes   -= port->stats.obytes;
1150                 port->stats.obytes = 0;
1151                 stats.imissed  -= port->stats.imissed;
1152                 port->stats.imissed = 0;
1153                 stats.oerrors  -= port->stats.oerrors;
1154                 port->stats.oerrors = 0;
1155                 stats.rx_nombuf -= port->stats.rx_nombuf;
1156                 port->stats.rx_nombuf = 0;
1157
1158                 total_recv += stats.ipackets;
1159                 total_xmit += stats.opackets;
1160                 total_rx_dropped += stats.imissed;
1161                 total_tx_dropped += port->tx_dropped;
1162                 total_rx_nombuf  += stats.rx_nombuf;
1163
1164                 fwd_port_stats_display(pt_id, &stats);
1165         }
1166         printf("\n  %s Accumulated forward statistics for all ports"
1167                "%s\n",
1168                acc_stats_border, acc_stats_border);
1169         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1170                "%-"PRIu64"\n"
1171                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1172                "%-"PRIu64"\n",
1173                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1174                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1175         if (total_rx_nombuf > 0)
1176                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1177         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1178                "%s\n",
1179                acc_stats_border, acc_stats_border);
1180 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1181         if (total_recv > 0)
1182                 printf("\n  CPU cycles/packet=%u (total cycles="
1183                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1184                        (unsigned int)(fwd_cycles / total_recv),
1185                        fwd_cycles, total_recv);
1186 #endif
1187         printf("\nDone.\n");
1188         test_done = 1;
1189 }
1190
1191 void
1192 dev_set_link_up(portid_t pid)
1193 {
1194         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1195                 printf("\nSet link up fail.\n");
1196 }
1197
1198 void
1199 dev_set_link_down(portid_t pid)
1200 {
1201         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1202                 printf("\nSet link down fail.\n");
1203 }
1204
1205 static int
1206 all_ports_started(void)
1207 {
1208         portid_t pi;
1209         struct rte_port *port;
1210
1211         FOREACH_PORT(pi, ports) {
1212                 port = &ports[pi];
1213                 /* Check if there is a port which is not started */
1214                 if ((port->port_status != RTE_PORT_STARTED) &&
1215                         (port->slave_flag == 0))
1216                         return 0;
1217         }
1218
1219         /* No port is not started */
1220         return 1;
1221 }
1222
1223 int
1224 all_ports_stopped(void)
1225 {
1226         portid_t pi;
1227         struct rte_port *port;
1228
1229         FOREACH_PORT(pi, ports) {
1230                 port = &ports[pi];
1231                 if ((port->port_status != RTE_PORT_STOPPED) &&
1232                         (port->slave_flag == 0))
1233                         return 0;
1234         }
1235
1236         return 1;
1237 }
1238
1239 int
1240 port_is_started(portid_t port_id)
1241 {
1242         if (port_id_is_invalid(port_id, ENABLED_WARN))
1243                 return 0;
1244
1245         if (ports[port_id].port_status != RTE_PORT_STARTED)
1246                 return 0;
1247
1248         return 1;
1249 }
1250
1251 static int
1252 port_is_closed(portid_t port_id)
1253 {
1254         if (port_id_is_invalid(port_id, ENABLED_WARN))
1255                 return 0;
1256
1257         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1258                 return 0;
1259
1260         return 1;
1261 }
1262
1263 int
1264 start_port(portid_t pid)
1265 {
1266         int diag, need_check_link_status = -1;
1267         portid_t pi;
1268         queueid_t qi;
1269         struct rte_port *port;
1270         struct ether_addr mac_addr;
1271
1272         if (test_done == 0) {
1273                 printf("Please stop forwarding first\n");
1274                 return -1;
1275         }
1276
1277         if (port_id_is_invalid(pid, ENABLED_WARN))
1278                 return 0;
1279
1280         if (init_fwd_streams() < 0) {
1281                 printf("Fail from init_fwd_streams()\n");
1282                 return -1;
1283         }
1284
1285         if(dcb_config)
1286                 dcb_test = 1;
1287         FOREACH_PORT(pi, ports) {
1288                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1289                         continue;
1290
1291                 need_check_link_status = 0;
1292                 port = &ports[pi];
1293                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1294                                                  RTE_PORT_HANDLING) == 0) {
1295                         printf("Port %d is now not stopped\n", pi);
1296                         continue;
1297                 }
1298
1299                 if (port->need_reconfig > 0) {
1300                         port->need_reconfig = 0;
1301
1302                         printf("Configuring Port %d (socket %u)\n", pi,
1303                                         port->socket_id);
1304                         /* configure port */
1305                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1306                                                 &(port->dev_conf));
1307                         if (diag != 0) {
1308                                 if (rte_atomic16_cmpset(&(port->port_status),
1309                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1310                                         printf("Port %d can not be set back "
1311                                                         "to stopped\n", pi);
1312                                 printf("Fail to configure port %d\n", pi);
1313                                 /* try to reconfigure port next time */
1314                                 port->need_reconfig = 1;
1315                                 return -1;
1316                         }
1317                 }
1318                 if (port->need_reconfig_queues > 0) {
1319                         port->need_reconfig_queues = 0;
1320                         /* setup tx queues */
1321                         for (qi = 0; qi < nb_txq; qi++) {
1322                                 if ((numa_support) &&
1323                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1324                                         diag = rte_eth_tx_queue_setup(pi, qi,
1325                                                 nb_txd,txring_numa[pi],
1326                                                 &(port->tx_conf));
1327                                 else
1328                                         diag = rte_eth_tx_queue_setup(pi, qi,
1329                                                 nb_txd,port->socket_id,
1330                                                 &(port->tx_conf));
1331
1332                                 if (diag == 0)
1333                                         continue;
1334
1335                                 /* Fail to setup tx queue, return */
1336                                 if (rte_atomic16_cmpset(&(port->port_status),
1337                                                         RTE_PORT_HANDLING,
1338                                                         RTE_PORT_STOPPED) == 0)
1339                                         printf("Port %d can not be set back "
1340                                                         "to stopped\n", pi);
1341                                 printf("Fail to configure port %d tx queues\n", pi);
1342                                 /* try to reconfigure queues next time */
1343                                 port->need_reconfig_queues = 1;
1344                                 return -1;
1345                         }
1346                         /* setup rx queues */
1347                         for (qi = 0; qi < nb_rxq; qi++) {
1348                                 if ((numa_support) &&
1349                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1350                                         struct rte_mempool * mp =
1351                                                 mbuf_pool_find(rxring_numa[pi]);
1352                                         if (mp == NULL) {
1353                                                 printf("Failed to setup RX queue:"
1354                                                         "No mempool allocation"
1355                                                         "on the socket %d\n",
1356                                                         rxring_numa[pi]);
1357                                                 return -1;
1358                                         }
1359
1360                                         diag = rte_eth_rx_queue_setup(pi, qi,
1361                                              nb_rxd,rxring_numa[pi],
1362                                              &(port->rx_conf),mp);
1363                                 }
1364                                 else
1365                                         diag = rte_eth_rx_queue_setup(pi, qi,
1366                                              nb_rxd,port->socket_id,
1367                                              &(port->rx_conf),
1368                                              mbuf_pool_find(port->socket_id));
1369
1370                                 if (diag == 0)
1371                                         continue;
1372
1373
1374                                 /* Fail to setup rx queue, return */
1375                                 if (rte_atomic16_cmpset(&(port->port_status),
1376                                                         RTE_PORT_HANDLING,
1377                                                         RTE_PORT_STOPPED) == 0)
1378                                         printf("Port %d can not be set back "
1379                                                         "to stopped\n", pi);
1380                                 printf("Fail to configure port %d rx queues\n", pi);
1381                                 /* try to reconfigure queues next time */
1382                                 port->need_reconfig_queues = 1;
1383                                 return -1;
1384                         }
1385                 }
1386                 /* start port */
1387                 if (rte_eth_dev_start(pi) < 0) {
1388                         printf("Fail to start port %d\n", pi);
1389
1390                         /* Fail to setup rx queue, return */
1391                         if (rte_atomic16_cmpset(&(port->port_status),
1392                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393                                 printf("Port %d can not be set back to "
1394                                                         "stopped\n", pi);
1395                         continue;
1396                 }
1397
1398                 if (rte_atomic16_cmpset(&(port->port_status),
1399                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1400                         printf("Port %d can not be set into started\n", pi);
1401
1402                 rte_eth_macaddr_get(pi, &mac_addr);
1403                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1404                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1405                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1406                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1407
1408                 /* at least one port started, need checking link status */
1409                 need_check_link_status = 1;
1410         }
1411
1412         if (need_check_link_status == 1 && !no_link_check)
1413                 check_all_ports_link_status(RTE_PORT_ALL);
1414         else if (need_check_link_status == 0)
1415                 printf("Please stop the ports first\n");
1416
1417         printf("Done\n");
1418         return 0;
1419 }
1420
1421 void
1422 stop_port(portid_t pid)
1423 {
1424         portid_t pi;
1425         struct rte_port *port;
1426         int need_check_link_status = 0;
1427
1428         if (test_done == 0) {
1429                 printf("Please stop forwarding first\n");
1430                 return;
1431         }
1432         if (dcb_test) {
1433                 dcb_test = 0;
1434                 dcb_config = 0;
1435         }
1436
1437         if (port_id_is_invalid(pid, ENABLED_WARN))
1438                 return;
1439
1440         printf("Stopping ports...\n");
1441
1442         FOREACH_PORT(pi, ports) {
1443                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1444                         continue;
1445
1446                 port = &ports[pi];
1447                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1448                                                 RTE_PORT_HANDLING) == 0)
1449                         continue;
1450
1451                 rte_eth_dev_stop(pi);
1452
1453                 if (rte_atomic16_cmpset(&(port->port_status),
1454                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1455                         printf("Port %d can not be set into stopped\n", pi);
1456                 need_check_link_status = 1;
1457         }
1458         if (need_check_link_status && !no_link_check)
1459                 check_all_ports_link_status(RTE_PORT_ALL);
1460
1461         printf("Done\n");
1462 }
1463
1464 void
1465 close_port(portid_t pid)
1466 {
1467         portid_t pi;
1468         struct rte_port *port;
1469
1470         if (test_done == 0) {
1471                 printf("Please stop forwarding first\n");
1472                 return;
1473         }
1474
1475         if (port_id_is_invalid(pid, ENABLED_WARN))
1476                 return;
1477
1478         printf("Closing ports...\n");
1479
1480         FOREACH_PORT(pi, ports) {
1481                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1482                         continue;
1483
1484                 port = &ports[pi];
1485                 if (rte_atomic16_cmpset(&(port->port_status),
1486                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1487                         printf("Port %d is already closed\n", pi);
1488                         continue;
1489                 }
1490
1491                 if (rte_atomic16_cmpset(&(port->port_status),
1492                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1493                         printf("Port %d is now not stopped\n", pi);
1494                         continue;
1495                 }
1496
1497                 rte_eth_dev_close(pi);
1498
1499                 if (rte_atomic16_cmpset(&(port->port_status),
1500                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1501                         printf("Port %d can not be set into stopped\n", pi);
1502         }
1503
1504         printf("Done\n");
1505 }
1506
1507 void
1508 attach_port(char *identifier)
1509 {
1510         portid_t i, j, pi = 0;
1511
1512         printf("Attaching a new port...\n");
1513
1514         if (identifier == NULL) {
1515                 printf("Invalid parameters are specified\n");
1516                 return;
1517         }
1518
1519         if (test_done == 0) {
1520                 printf("Please stop forwarding first\n");
1521                 return;
1522         }
1523
1524         if (rte_eth_dev_attach(identifier, &pi))
1525                 return;
1526
1527         ports[pi].enabled = 1;
1528         reconfig(pi, rte_eth_dev_socket_id(pi));
1529         rte_eth_promiscuous_enable(pi);
1530
1531         nb_ports = rte_eth_dev_count();
1532
1533         /* set_default_fwd_ports_config(); */
1534         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1535         i = 0;
1536         FOREACH_PORT(j, ports) {
1537                 fwd_ports_ids[i] = j;
1538                 i++;
1539         }
1540         nb_cfg_ports = nb_ports;
1541         nb_fwd_ports++;
1542
1543         ports[pi].port_status = RTE_PORT_STOPPED;
1544
1545         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1546         printf("Done\n");
1547 }
1548
1549 void
1550 detach_port(uint8_t port_id)
1551 {
1552         portid_t i, pi = 0;
1553         char name[RTE_ETH_NAME_MAX_LEN];
1554
1555         printf("Detaching a port...\n");
1556
1557         if (!port_is_closed(port_id)) {
1558                 printf("Please close port first\n");
1559                 return;
1560         }
1561
1562         if (rte_eth_dev_detach(port_id, name))
1563                 return;
1564
1565         ports[port_id].enabled = 0;
1566         nb_ports = rte_eth_dev_count();
1567
1568         /* set_default_fwd_ports_config(); */
1569         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1570         i = 0;
1571         FOREACH_PORT(pi, ports) {
1572                 fwd_ports_ids[i] = pi;
1573                 i++;
1574         }
1575         nb_cfg_ports = nb_ports;
1576         nb_fwd_ports--;
1577
1578         printf("Port '%s' is detached. Now total ports is %d\n",
1579                         name, nb_ports);
1580         printf("Done\n");
1581         return;
1582 }
1583
1584 void
1585 pmd_test_exit(void)
1586 {
1587         portid_t pt_id;
1588
1589         if (test_done == 0)
1590                 stop_packet_forwarding();
1591
1592         if (ports != NULL) {
1593                 no_link_check = 1;
1594                 FOREACH_PORT(pt_id, ports) {
1595                         printf("\nShutting down port %d...\n", pt_id);
1596                         fflush(stdout);
1597                         stop_port(pt_id);
1598                         close_port(pt_id);
1599                 }
1600         }
1601         printf("\nBye...\n");
1602 }
1603
1604 typedef void (*cmd_func_t)(void);
1605 struct pmd_test_command {
1606         const char *cmd_name;
1607         cmd_func_t cmd_func;
1608 };
1609
1610 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1611
1612 /* Check the link status of all ports in up to 9s, and print them finally */
1613 static void
1614 check_all_ports_link_status(uint32_t port_mask)
1615 {
1616 #define CHECK_INTERVAL 100 /* 100ms */
1617 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1618         uint8_t portid, count, all_ports_up, print_flag = 0;
1619         struct rte_eth_link link;
1620
1621         printf("Checking link statuses...\n");
1622         fflush(stdout);
1623         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1624                 all_ports_up = 1;
1625                 FOREACH_PORT(portid, ports) {
1626                         if ((port_mask & (1 << portid)) == 0)
1627                                 continue;
1628                         memset(&link, 0, sizeof(link));
1629                         rte_eth_link_get_nowait(portid, &link);
1630                         /* print link status if flag set */
1631                         if (print_flag == 1) {
1632                                 if (link.link_status)
1633                                         printf("Port %d Link Up - speed %u "
1634                                                 "Mbps - %s\n", (uint8_t)portid,
1635                                                 (unsigned)link.link_speed,
1636                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1637                                         ("full-duplex") : ("half-duplex\n"));
1638                                 else
1639                                         printf("Port %d Link Down\n",
1640                                                 (uint8_t)portid);
1641                                 continue;
1642                         }
1643                         /* clear all_ports_up flag if any link down */
1644                         if (link.link_status == 0) {
1645                                 all_ports_up = 0;
1646                                 break;
1647                         }
1648                 }
1649                 /* after finally printing all link status, get out */
1650                 if (print_flag == 1)
1651                         break;
1652
1653                 if (all_ports_up == 0) {
1654                         fflush(stdout);
1655                         rte_delay_ms(CHECK_INTERVAL);
1656                 }
1657
1658                 /* set the print_flag if all ports up or timeout */
1659                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1660                         print_flag = 1;
1661                 }
1662         }
1663 }
1664
1665 static int
1666 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1667 {
1668         uint16_t i;
1669         int diag;
1670         uint8_t mapping_found = 0;
1671
1672         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1673                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1674                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1675                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1676                                         tx_queue_stats_mappings[i].queue_id,
1677                                         tx_queue_stats_mappings[i].stats_counter_id);
1678                         if (diag != 0)
1679                                 return diag;
1680                         mapping_found = 1;
1681                 }
1682         }
1683         if (mapping_found)
1684                 port->tx_queue_stats_mapping_enabled = 1;
1685         return 0;
1686 }
1687
1688 static int
1689 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1690 {
1691         uint16_t i;
1692         int diag;
1693         uint8_t mapping_found = 0;
1694
1695         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1696                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1697                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1698                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1699                                         rx_queue_stats_mappings[i].queue_id,
1700                                         rx_queue_stats_mappings[i].stats_counter_id);
1701                         if (diag != 0)
1702                                 return diag;
1703                         mapping_found = 1;
1704                 }
1705         }
1706         if (mapping_found)
1707                 port->rx_queue_stats_mapping_enabled = 1;
1708         return 0;
1709 }
1710
1711 static void
1712 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1713 {
1714         int diag = 0;
1715
1716         diag = set_tx_queue_stats_mapping_registers(pi, port);
1717         if (diag != 0) {
1718                 if (diag == -ENOTSUP) {
1719                         port->tx_queue_stats_mapping_enabled = 0;
1720                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1721                 }
1722                 else
1723                         rte_exit(EXIT_FAILURE,
1724                                         "set_tx_queue_stats_mapping_registers "
1725                                         "failed for port id=%d diag=%d\n",
1726                                         pi, diag);
1727         }
1728
1729         diag = set_rx_queue_stats_mapping_registers(pi, port);
1730         if (diag != 0) {
1731                 if (diag == -ENOTSUP) {
1732                         port->rx_queue_stats_mapping_enabled = 0;
1733                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1734                 }
1735                 else
1736                         rte_exit(EXIT_FAILURE,
1737                                         "set_rx_queue_stats_mapping_registers "
1738                                         "failed for port id=%d diag=%d\n",
1739                                         pi, diag);
1740         }
1741 }
1742
1743 static void
1744 rxtx_port_config(struct rte_port *port)
1745 {
1746         port->rx_conf = port->dev_info.default_rxconf;
1747         port->tx_conf = port->dev_info.default_txconf;
1748
1749         /* Check if any RX/TX parameters have been passed */
1750         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1751                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1752
1753         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1754                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1755
1756         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1757                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1758
1759         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1760                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1761
1762         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1763                 port->rx_conf.rx_drop_en = rx_drop_en;
1764
1765         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1766                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1767
1768         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1769                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1770
1771         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1772                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1773
1774         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1775                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1776
1777         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1778                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1779
1780         if (txq_flags != RTE_PMD_PARAM_UNSET)
1781                 port->tx_conf.txq_flags = txq_flags;
1782 }
1783
1784 void
1785 init_port_config(void)
1786 {
1787         portid_t pid;
1788         struct rte_port *port;
1789
1790         FOREACH_PORT(pid, ports) {
1791                 port = &ports[pid];
1792                 port->dev_conf.rxmode = rx_mode;
1793                 port->dev_conf.fdir_conf = fdir_conf;
1794                 if (nb_rxq > 1) {
1795                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1796                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1797                 } else {
1798                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1799                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1800                 }
1801
1802                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1803                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1804                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1805                         else
1806                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1807                 }
1808
1809                 if (port->dev_info.max_vfs != 0) {
1810                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1811                                 port->dev_conf.rxmode.mq_mode =
1812                                         ETH_MQ_RX_VMDQ_RSS;
1813                         else
1814                                 port->dev_conf.rxmode.mq_mode =
1815                                         ETH_MQ_RX_NONE;
1816
1817                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1818                 }
1819
1820                 rxtx_port_config(port);
1821
1822                 rte_eth_macaddr_get(pid, &port->eth_addr);
1823
1824                 map_port_queue_stats_mapping_registers(pid, port);
1825 #ifdef RTE_NIC_BYPASS
1826                 rte_eth_dev_bypass_init(pid);
1827 #endif
1828         }
1829 }
1830
1831 void set_port_slave_flag(portid_t slave_pid)
1832 {
1833         struct rte_port *port;
1834
1835         port = &ports[slave_pid];
1836         port->slave_flag = 1;
1837 }
1838
1839 void clear_port_slave_flag(portid_t slave_pid)
1840 {
1841         struct rte_port *port;
1842
1843         port = &ports[slave_pid];
1844         port->slave_flag = 0;
1845 }
1846
1847 const uint16_t vlan_tags[] = {
1848                 0,  1,  2,  3,  4,  5,  6,  7,
1849                 8,  9, 10, 11,  12, 13, 14, 15,
1850                 16, 17, 18, 19, 20, 21, 22, 23,
1851                 24, 25, 26, 27, 28, 29, 30, 31
1852 };
1853
1854 static  int
1855 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1856                  enum dcb_mode_enable dcb_mode,
1857                  enum rte_eth_nb_tcs num_tcs,
1858                  uint8_t pfc_en)
1859 {
1860         uint8_t i;
1861
1862         /*
1863          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1864          * given above, and the number of traffic classes available for use.
1865          */
1866         if (dcb_mode == DCB_VT_ENABLED) {
1867                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1868                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1869                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1870                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1871
1872                 /* VMDQ+DCB RX and TX configrations */
1873                 vmdq_rx_conf->enable_default_pool = 0;
1874                 vmdq_rx_conf->default_pool = 0;
1875                 vmdq_rx_conf->nb_queue_pools =
1876                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1877                 vmdq_tx_conf->nb_queue_pools =
1878                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1879
1880                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1881                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1882                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1883                         vmdq_rx_conf->pool_map[i].pools =
1884                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1885                 }
1886                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1887                         vmdq_rx_conf->dcb_tc[i] = i;
1888                         vmdq_tx_conf->dcb_tc[i] = i;
1889                 }
1890
1891                 /* set DCB mode of RX and TX of multiple queues */
1892                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1893                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1894         } else {
1895                 struct rte_eth_dcb_rx_conf *rx_conf =
1896                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1897                 struct rte_eth_dcb_tx_conf *tx_conf =
1898                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1899
1900                 rx_conf->nb_tcs = num_tcs;
1901                 tx_conf->nb_tcs = num_tcs;
1902
1903                 for (i = 0; i < num_tcs; i++) {
1904                         rx_conf->dcb_tc[i] = i;
1905                         tx_conf->dcb_tc[i] = i;
1906                 }
1907                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1908                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1909                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1910         }
1911
1912         if (pfc_en)
1913                 eth_conf->dcb_capability_en =
1914                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1915         else
1916                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1917
1918         return 0;
1919 }
1920
1921 int
1922 init_port_dcb_config(portid_t pid,
1923                      enum dcb_mode_enable dcb_mode,
1924                      enum rte_eth_nb_tcs num_tcs,
1925                      uint8_t pfc_en)
1926 {
1927         struct rte_eth_conf port_conf;
1928         struct rte_eth_dev_info dev_info;
1929         struct rte_port *rte_port;
1930         int retval;
1931         uint16_t i;
1932
1933         rte_eth_dev_info_get(pid, &dev_info);
1934
1935         /* If dev_info.vmdq_pool_base is greater than 0,
1936          * the queue id of vmdq pools is started after pf queues.
1937          */
1938         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1939                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1940                         " for port %d.", pid);
1941                 return -1;
1942         }
1943
1944         /* Assume the ports in testpmd have the same dcb capability
1945          * and has the same number of rxq and txq in dcb mode
1946          */
1947         if (dcb_mode == DCB_VT_ENABLED) {
1948                 nb_rxq = dev_info.max_rx_queues;
1949                 nb_txq = dev_info.max_tx_queues;
1950         } else {
1951                 /*if vt is disabled, use all pf queues */
1952                 if (dev_info.vmdq_pool_base == 0) {
1953                         nb_rxq = dev_info.max_rx_queues;
1954                         nb_txq = dev_info.max_tx_queues;
1955                 } else {
1956                         nb_rxq = (queueid_t)num_tcs;
1957                         nb_txq = (queueid_t)num_tcs;
1958
1959                 }
1960         }
1961         rx_free_thresh = 64;
1962
1963         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1964         /* Enter DCB configuration status */
1965         dcb_config = 1;
1966
1967         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1968         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1969         if (retval < 0)
1970                 return retval;
1971
1972         rte_port = &ports[pid];
1973         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1974
1975         rxtx_port_config(rte_port);
1976         /* VLAN filter */
1977         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1978         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1979                 rx_vft_set(pid, vlan_tags[i], 1);
1980
1981         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1982         map_port_queue_stats_mapping_registers(pid, rte_port);
1983
1984         rte_port->dcb_flag = 1;
1985
1986         return 0;
1987 }
1988
1989 static void
1990 init_port(void)
1991 {
1992         portid_t pid;
1993
1994         /* Configuration of Ethernet ports. */
1995         ports = rte_zmalloc("testpmd: ports",
1996                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1997                             RTE_CACHE_LINE_SIZE);
1998         if (ports == NULL) {
1999                 rte_exit(EXIT_FAILURE,
2000                                 "rte_zmalloc(%d struct rte_port) failed\n",
2001                                 RTE_MAX_ETHPORTS);
2002         }
2003
2004         /* enabled allocated ports */
2005         for (pid = 0; pid < nb_ports; pid++)
2006                 ports[pid].enabled = 1;
2007 }
2008
2009 static void
2010 force_quit(void)
2011 {
2012         pmd_test_exit();
2013         prompt_exit();
2014 }
2015
2016 static void
2017 signal_handler(int signum)
2018 {
2019         if (signum == SIGINT || signum == SIGTERM) {
2020                 printf("\nSignal %d received, preparing to exit...\n",
2021                                 signum);
2022                 force_quit();
2023                 /* exit with the expected status */
2024                 signal(signum, SIG_DFL);
2025                 kill(getpid(), signum);
2026         }
2027 }
2028
2029 int
2030 main(int argc, char** argv)
2031 {
2032         int  diag;
2033         uint8_t port_id;
2034
2035         signal(SIGINT, signal_handler);
2036         signal(SIGTERM, signal_handler);
2037
2038         diag = rte_eal_init(argc, argv);
2039         if (diag < 0)
2040                 rte_panic("Cannot init EAL\n");
2041
2042         nb_ports = (portid_t) rte_eth_dev_count();
2043         if (nb_ports == 0)
2044                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2045
2046         /* allocate port structures, and init them */
2047         init_port();
2048
2049         set_def_fwd_config();
2050         if (nb_lcores == 0)
2051                 rte_panic("Empty set of forwarding logical cores - check the "
2052                           "core mask supplied in the command parameters\n");
2053
2054         argc -= diag;
2055         argv += diag;
2056         if (argc > 1)
2057                 launch_args_parse(argc, argv);
2058
2059         if (!nb_rxq && !nb_txq)
2060                 printf("Warning: Either rx or tx queues should be non-zero\n");
2061
2062         if (nb_rxq > 1 && nb_rxq > nb_txq)
2063                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2064                        "but nb_txq=%d will prevent to fully test it.\n",
2065                        nb_rxq, nb_txq);
2066
2067         init_config();
2068         if (start_port(RTE_PORT_ALL) != 0)
2069                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2070
2071         /* set all ports to promiscuous mode by default */
2072         FOREACH_PORT(port_id, ports)
2073                 rte_eth_promiscuous_enable(port_id);
2074
2075 #ifdef RTE_LIBRTE_CMDLINE
2076         if (interactive == 1) {
2077                 if (auto_start) {
2078                         printf("Start automatic packet forwarding\n");
2079                         start_packet_forwarding(0);
2080                 }
2081                 prompt();
2082         } else
2083 #endif
2084         {
2085                 char c;
2086                 int rc;
2087
2088                 printf("No commandline core given, start packet forwarding\n");
2089                 start_packet_forwarding(0);
2090                 printf("Press enter to exit\n");
2091                 rc = read(0, &c, 1);
2092                 pmd_test_exit();
2093                 if (rc < 0)
2094                         return 1;
2095         }
2096
2097         return 0;
2098 }