app/testpmd: check stopping port is not in bonding
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79
80 #include "testpmd.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp = NULL;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419         RTE_LOG(INFO, USER1,
420                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
422
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425                 (unsigned) mb_mempool_cache,
426                 sizeof(struct rte_pktmbuf_pool_private),
427                 rte_pktmbuf_pool_init, NULL,
428                 rte_pktmbuf_init, NULL,
429                 socket_id, 0);
430 #endif
431
432         /* if the former XEN allocation failed fall back to normal allocation */
433         if (rte_mp == NULL) {
434                 if (mp_anon != 0) {
435                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436                                 mb_size, (unsigned) mb_mempool_cache,
437                                 sizeof(struct rte_pktmbuf_pool_private),
438                                 socket_id, 0);
439
440                         if (rte_mempool_populate_anon(rte_mp) == 0) {
441                                 rte_mempool_free(rte_mp);
442                                 rte_mp = NULL;
443                         }
444                         rte_pktmbuf_pool_init(rte_mp, NULL);
445                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
446                 } else {
447                         /* wrapper to rte_mempool_create() */
448                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
450                 }
451         }
452
453         if (rte_mp == NULL) {
454                 rte_exit(EXIT_FAILURE,
455                         "Creation of mbuf pool for socket %u failed: %s\n",
456                         socket_id, rte_strerror(rte_errno));
457         } else if (verbose_level > 0) {
458                 rte_mempool_dump(stdout, rte_mp);
459         }
460 }
461
462 /*
463  * Check given socket id is valid or not with NUMA mode,
464  * if valid, return 0, else return -1
465  */
466 static int
467 check_socket_id(const unsigned int socket_id)
468 {
469         static int warning_once = 0;
470
471         if (socket_id >= max_socket) {
472                 if (!warning_once && numa_support)
473                         printf("Warning: NUMA should be configured manually by"
474                                " using --port-numa-config and"
475                                " --ring-numa-config parameters along with"
476                                " --numa.\n");
477                 warning_once = 1;
478                 return -1;
479         }
480         return 0;
481 }
482
483 static void
484 init_config(void)
485 {
486         portid_t pid;
487         struct rte_port *port;
488         struct rte_mempool *mbp;
489         unsigned int nb_mbuf_per_pool;
490         lcoreid_t  lc_id;
491         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
492
493         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494         /* Configuration of logical cores. */
495         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496                                 sizeof(struct fwd_lcore *) * nb_lcores,
497                                 RTE_CACHE_LINE_SIZE);
498         if (fwd_lcores == NULL) {
499                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500                                                         "failed\n", nb_lcores);
501         }
502         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504                                                sizeof(struct fwd_lcore),
505                                                RTE_CACHE_LINE_SIZE);
506                 if (fwd_lcores[lc_id] == NULL) {
507                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
508                                                                 "failed\n");
509                 }
510                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
511         }
512
513         /*
514          * Create pools of mbuf.
515          * If NUMA support is disabled, create a single pool of mbuf in
516          * socket 0 memory by default.
517          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
518          *
519          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520          * nb_txd can be configured at run time.
521          */
522         if (param_total_num_mbufs)
523                 nb_mbuf_per_pool = param_total_num_mbufs;
524         else {
525                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
527
528                 if (!numa_support)
529                         nb_mbuf_per_pool =
530                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
531         }
532
533         if (!numa_support) {
534                 if (socket_num == UMA_NO_CONFIG)
535                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
536                 else
537                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
538                                                  socket_num);
539         }
540
541         FOREACH_PORT(pid, ports) {
542                 port = &ports[pid];
543                 rte_eth_dev_info_get(pid, &port->dev_info);
544
545                 if (numa_support) {
546                         if (port_numa[pid] != NUMA_NO_CONFIG)
547                                 port_per_socket[port_numa[pid]]++;
548                         else {
549                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
550
551                                 /* if socket_id is invalid, set to 0 */
552                                 if (check_socket_id(socket_id) < 0)
553                                         socket_id = 0;
554                                 port_per_socket[socket_id]++;
555                         }
556                 }
557
558                 /* set flag to initialize port/queue */
559                 port->need_reconfig = 1;
560                 port->need_reconfig_queues = 1;
561         }
562
563         if (numa_support) {
564                 uint8_t i;
565                 unsigned int nb_mbuf;
566
567                 if (param_total_num_mbufs)
568                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
569
570                 for (i = 0; i < max_socket; i++) {
571                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572                         if (nb_mbuf)
573                                 mbuf_pool_create(mbuf_data_size,
574                                                 nb_mbuf,i);
575                 }
576         }
577         init_port_config();
578
579         /*
580          * Records which Mbuf pool to use by each logical core, if needed.
581          */
582         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583                 mbp = mbuf_pool_find(
584                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
585
586                 if (mbp == NULL)
587                         mbp = mbuf_pool_find(0);
588                 fwd_lcores[lc_id]->mbp = mbp;
589         }
590
591         /* Configuration of packet forwarding streams. */
592         if (init_fwd_streams() < 0)
593                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
594 }
595
596
597 void
598 reconfig(portid_t new_port_id, unsigned socket_id)
599 {
600         struct rte_port *port;
601
602         /* Reconfiguration of Ethernet ports. */
603         port = &ports[new_port_id];
604         rte_eth_dev_info_get(new_port_id, &port->dev_info);
605
606         /* set flag to initialize port/queue */
607         port->need_reconfig = 1;
608         port->need_reconfig_queues = 1;
609         port->socket_id = socket_id;
610
611         init_port_config();
612 }
613
614
615 int
616 init_fwd_streams(void)
617 {
618         portid_t pid;
619         struct rte_port *port;
620         streamid_t sm_id, nb_fwd_streams_new;
621         queueid_t q;
622
623         /* set socket id according to numa or not */
624         FOREACH_PORT(pid, ports) {
625                 port = &ports[pid];
626                 if (nb_rxq > port->dev_info.max_rx_queues) {
627                         printf("Fail: nb_rxq(%d) is greater than "
628                                 "max_rx_queues(%d)\n", nb_rxq,
629                                 port->dev_info.max_rx_queues);
630                         return -1;
631                 }
632                 if (nb_txq > port->dev_info.max_tx_queues) {
633                         printf("Fail: nb_txq(%d) is greater than "
634                                 "max_tx_queues(%d)\n", nb_txq,
635                                 port->dev_info.max_tx_queues);
636                         return -1;
637                 }
638                 if (numa_support) {
639                         if (port_numa[pid] != NUMA_NO_CONFIG)
640                                 port->socket_id = port_numa[pid];
641                         else {
642                                 port->socket_id = rte_eth_dev_socket_id(pid);
643
644                                 /* if socket_id is invalid, set to 0 */
645                                 if (check_socket_id(port->socket_id) < 0)
646                                         port->socket_id = 0;
647                         }
648                 }
649                 else {
650                         if (socket_num == UMA_NO_CONFIG)
651                                 port->socket_id = 0;
652                         else
653                                 port->socket_id = socket_num;
654                 }
655         }
656
657         q = RTE_MAX(nb_rxq, nb_txq);
658         if (q == 0) {
659                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
660                 return -1;
661         }
662         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
663         if (nb_fwd_streams_new == nb_fwd_streams)
664                 return 0;
665         /* clear the old */
666         if (fwd_streams != NULL) {
667                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668                         if (fwd_streams[sm_id] == NULL)
669                                 continue;
670                         rte_free(fwd_streams[sm_id]);
671                         fwd_streams[sm_id] = NULL;
672                 }
673                 rte_free(fwd_streams);
674                 fwd_streams = NULL;
675         }
676
677         /* init new */
678         nb_fwd_streams = nb_fwd_streams_new;
679         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
681         if (fwd_streams == NULL)
682                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683                                                 "failed\n", nb_fwd_streams);
684
685         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
688                 if (fwd_streams[sm_id] == NULL)
689                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
690                                                                 " failed\n");
691         }
692
693         return 0;
694 }
695
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 static void
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 {
700         unsigned int total_burst;
701         unsigned int nb_burst;
702         unsigned int burst_stats[3];
703         uint16_t pktnb_stats[3];
704         uint16_t nb_pkt;
705         int burst_percent[3];
706
707         /*
708          * First compute the total number of packet bursts and the
709          * two highest numbers of bursts of the same number of packets.
710          */
711         total_burst = 0;
712         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
716                 if (nb_burst == 0)
717                         continue;
718                 total_burst += nb_burst;
719                 if (nb_burst > burst_stats[0]) {
720                         burst_stats[1] = burst_stats[0];
721                         pktnb_stats[1] = pktnb_stats[0];
722                         burst_stats[0] = nb_burst;
723                         pktnb_stats[0] = nb_pkt;
724                 }
725         }
726         if (total_burst == 0)
727                 return;
728         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730                burst_percent[0], (int) pktnb_stats[0]);
731         if (burst_stats[0] == total_burst) {
732                 printf("]\n");
733                 return;
734         }
735         if (burst_stats[0] + burst_stats[1] == total_burst) {
736                 printf(" + %d%% of %d pkts]\n",
737                        100 - burst_percent[0], pktnb_stats[1]);
738                 return;
739         }
740         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
744                 return;
745         }
746         printf(" + %d%% of %d pkts + %d%% of others]\n",
747                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 }
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
750
751 static void
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 {
754         struct rte_port *port;
755         uint8_t i;
756
757         static const char *fwd_stats_border = "----------------------";
758
759         port = &ports[port_id];
760         printf("\n  %s Forward statistics for port %-2d %s\n",
761                fwd_stats_border, port_id, fwd_stats_border);
762
763         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765                        "%-"PRIu64"\n",
766                        stats->ipackets, stats->imissed,
767                        (uint64_t) (stats->ipackets + stats->imissed));
768
769                 if (cur_fwd_eng == &csum_fwd_engine)
770                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
773                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
774                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
775                 }
776
777                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
778                        "%-"PRIu64"\n",
779                        stats->opackets, port->tx_dropped,
780                        (uint64_t) (stats->opackets + port->tx_dropped));
781         }
782         else {
783                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
784                        "%14"PRIu64"\n",
785                        stats->ipackets, stats->imissed,
786                        (uint64_t) (stats->ipackets + stats->imissed));
787
788                 if (cur_fwd_eng == &csum_fwd_engine)
789                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
790                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
792                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
793                         printf("  RX-nombufs:             %14"PRIu64"\n",
794                                stats->rx_nombuf);
795                 }
796
797                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
798                        "%14"PRIu64"\n",
799                        stats->opackets, port->tx_dropped,
800                        (uint64_t) (stats->opackets + port->tx_dropped));
801         }
802
803 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
804         if (port->rx_stream)
805                 pkt_burst_stats_display("RX",
806                         &port->rx_stream->rx_burst_stats);
807         if (port->tx_stream)
808                 pkt_burst_stats_display("TX",
809                         &port->tx_stream->tx_burst_stats);
810 #endif
811
812         if (port->rx_queue_stats_mapping_enabled) {
813                 printf("\n");
814                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
815                         printf("  Stats reg %2d RX-packets:%14"PRIu64
816                                "     RX-errors:%14"PRIu64
817                                "    RX-bytes:%14"PRIu64"\n",
818                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
819                 }
820                 printf("\n");
821         }
822         if (port->tx_queue_stats_mapping_enabled) {
823                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
824                         printf("  Stats reg %2d TX-packets:%14"PRIu64
825                                "                                 TX-bytes:%14"PRIu64"\n",
826                                i, stats->q_opackets[i], stats->q_obytes[i]);
827                 }
828         }
829
830         printf("  %s--------------------------------%s\n",
831                fwd_stats_border, fwd_stats_border);
832 }
833
834 static void
835 fwd_stream_stats_display(streamid_t stream_id)
836 {
837         struct fwd_stream *fs;
838         static const char *fwd_top_stats_border = "-------";
839
840         fs = fwd_streams[stream_id];
841         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
842             (fs->fwd_dropped == 0))
843                 return;
844         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
845                "TX Port=%2d/Queue=%2d %s\n",
846                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
847                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
848         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
849                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
850
851         /* if checksum mode */
852         if (cur_fwd_eng == &csum_fwd_engine) {
853                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
854                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
855         }
856
857 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
858         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
859         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
860 #endif
861 }
862
863 static void
864 flush_fwd_rx_queues(void)
865 {
866         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
867         portid_t  rxp;
868         portid_t port_id;
869         queueid_t rxq;
870         uint16_t  nb_rx;
871         uint16_t  i;
872         uint8_t   j;
873
874         for (j = 0; j < 2; j++) {
875                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
876                         for (rxq = 0; rxq < nb_rxq; rxq++) {
877                                 port_id = fwd_ports_ids[rxp];
878                                 do {
879                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
880                                                 pkts_burst, MAX_PKT_BURST);
881                                         for (i = 0; i < nb_rx; i++)
882                                                 rte_pktmbuf_free(pkts_burst[i]);
883                                 } while (nb_rx > 0);
884                         }
885                 }
886                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
887         }
888 }
889
890 static void
891 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
892 {
893         struct fwd_stream **fsm;
894         streamid_t nb_fs;
895         streamid_t sm_id;
896
897         fsm = &fwd_streams[fc->stream_idx];
898         nb_fs = fc->stream_nb;
899         do {
900                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
901                         (*pkt_fwd)(fsm[sm_id]);
902         } while (! fc->stopped);
903 }
904
905 static int
906 start_pkt_forward_on_core(void *fwd_arg)
907 {
908         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
909                              cur_fwd_config.fwd_eng->packet_fwd);
910         return 0;
911 }
912
913 /*
914  * Run the TXONLY packet forwarding engine to send a single burst of packets.
915  * Used to start communication flows in network loopback test configurations.
916  */
917 static int
918 run_one_txonly_burst_on_core(void *fwd_arg)
919 {
920         struct fwd_lcore *fwd_lc;
921         struct fwd_lcore tmp_lcore;
922
923         fwd_lc = (struct fwd_lcore *) fwd_arg;
924         tmp_lcore = *fwd_lc;
925         tmp_lcore.stopped = 1;
926         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
927         return 0;
928 }
929
930 /*
931  * Launch packet forwarding:
932  *     - Setup per-port forwarding context.
933  *     - launch logical cores with their forwarding configuration.
934  */
935 static void
936 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
937 {
938         port_fwd_begin_t port_fwd_begin;
939         unsigned int i;
940         unsigned int lc_id;
941         int diag;
942
943         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
944         if (port_fwd_begin != NULL) {
945                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
946                         (*port_fwd_begin)(fwd_ports_ids[i]);
947         }
948         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
949                 lc_id = fwd_lcores_cpuids[i];
950                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
951                         fwd_lcores[i]->stopped = 0;
952                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
953                                                      fwd_lcores[i], lc_id);
954                         if (diag != 0)
955                                 printf("launch lcore %u failed - diag=%d\n",
956                                        lc_id, diag);
957                 }
958         }
959 }
960
961 /*
962  * Launch packet forwarding configuration.
963  */
964 void
965 start_packet_forwarding(int with_tx_first)
966 {
967         port_fwd_begin_t port_fwd_begin;
968         port_fwd_end_t  port_fwd_end;
969         struct rte_port *port;
970         unsigned int i;
971         portid_t   pt_id;
972         streamid_t sm_id;
973
974         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
975                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
976
977         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
978                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
979
980         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
981                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
982                 (!nb_rxq || !nb_txq))
983                 rte_exit(EXIT_FAILURE,
984                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
985                         cur_fwd_eng->fwd_mode_name);
986
987         if (all_ports_started() == 0) {
988                 printf("Not all ports were started\n");
989                 return;
990         }
991         if (test_done == 0) {
992                 printf("Packet forwarding already started\n");
993                 return;
994         }
995         if(dcb_test) {
996                 for (i = 0; i < nb_fwd_ports; i++) {
997                         pt_id = fwd_ports_ids[i];
998                         port = &ports[pt_id];
999                         if (!port->dcb_flag) {
1000                                 printf("In DCB mode, all forwarding ports must "
1001                                        "be configured in this mode.\n");
1002                                 return;
1003                         }
1004                 }
1005                 if (nb_fwd_lcores == 1) {
1006                         printf("In DCB mode,the nb forwarding cores "
1007                                "should be larger than 1.\n");
1008                         return;
1009                 }
1010         }
1011         test_done = 0;
1012
1013         if(!no_flush_rx)
1014                 flush_fwd_rx_queues();
1015
1016         fwd_config_setup();
1017         rxtx_config_display();
1018
1019         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020                 pt_id = fwd_ports_ids[i];
1021                 port = &ports[pt_id];
1022                 rte_eth_stats_get(pt_id, &port->stats);
1023                 port->tx_dropped = 0;
1024
1025                 map_port_queue_stats_mapping_registers(pt_id, port);
1026         }
1027         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028                 fwd_streams[sm_id]->rx_packets = 0;
1029                 fwd_streams[sm_id]->tx_packets = 0;
1030                 fwd_streams[sm_id]->fwd_dropped = 0;
1031                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1033
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1039 #endif
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041                 fwd_streams[sm_id]->core_cycles = 0;
1042 #endif
1043         }
1044         if (with_tx_first) {
1045                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1046                 if (port_fwd_begin != NULL) {
1047                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1049                 }
1050                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1051                 rte_eal_mp_wait_lcore();
1052                 port_fwd_end = tx_only_engine.port_fwd_end;
1053                 if (port_fwd_end != NULL) {
1054                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055                                 (*port_fwd_end)(fwd_ports_ids[i]);
1056                 }
1057         }
1058         launch_packet_forwarding(start_pkt_forward_on_core);
1059 }
1060
1061 void
1062 stop_packet_forwarding(void)
1063 {
1064         struct rte_eth_stats stats;
1065         struct rte_port *port;
1066         port_fwd_end_t  port_fwd_end;
1067         int i;
1068         portid_t   pt_id;
1069         streamid_t sm_id;
1070         lcoreid_t  lc_id;
1071         uint64_t total_recv;
1072         uint64_t total_xmit;
1073         uint64_t total_rx_dropped;
1074         uint64_t total_tx_dropped;
1075         uint64_t total_rx_nombuf;
1076         uint64_t tx_dropped;
1077         uint64_t rx_bad_ip_csum;
1078         uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080         uint64_t fwd_cycles;
1081 #endif
1082         static const char *acc_stats_border = "+++++++++++++++";
1083
1084         if (all_ports_started() == 0) {
1085                 printf("Not all ports were started\n");
1086                 return;
1087         }
1088         if (test_done) {
1089                 printf("Packet forwarding not started\n");
1090                 return;
1091         }
1092         printf("Telling cores to stop...");
1093         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094                 fwd_lcores[lc_id]->stopped = 1;
1095         printf("\nWaiting for lcores to finish...\n");
1096         rte_eal_mp_wait_lcore();
1097         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098         if (port_fwd_end != NULL) {
1099                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100                         pt_id = fwd_ports_ids[i];
1101                         (*port_fwd_end)(pt_id);
1102                 }
1103         }
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1105         fwd_cycles = 0;
1106 #endif
1107         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108                 if (cur_fwd_config.nb_fwd_streams >
1109                     cur_fwd_config.nb_fwd_ports) {
1110                         fwd_stream_stats_display(sm_id);
1111                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1113                 } else {
1114                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1115                                 fwd_streams[sm_id];
1116                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1117                                 fwd_streams[sm_id];
1118                 }
1119                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120                 tx_dropped = (uint64_t) (tx_dropped +
1121                                          fwd_streams[sm_id]->fwd_dropped);
1122                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1123
1124                 rx_bad_ip_csum =
1125                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1128                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1129                                                         rx_bad_ip_csum;
1130
1131                 rx_bad_l4_csum =
1132                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1135                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1136                                                         rx_bad_l4_csum;
1137
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139                 fwd_cycles = (uint64_t) (fwd_cycles +
1140                                          fwd_streams[sm_id]->core_cycles);
1141 #endif
1142         }
1143         total_recv = 0;
1144         total_xmit = 0;
1145         total_rx_dropped = 0;
1146         total_tx_dropped = 0;
1147         total_rx_nombuf  = 0;
1148         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149                 pt_id = fwd_ports_ids[i];
1150
1151                 port = &ports[pt_id];
1152                 rte_eth_stats_get(pt_id, &stats);
1153                 stats.ipackets -= port->stats.ipackets;
1154                 port->stats.ipackets = 0;
1155                 stats.opackets -= port->stats.opackets;
1156                 port->stats.opackets = 0;
1157                 stats.ibytes   -= port->stats.ibytes;
1158                 port->stats.ibytes = 0;
1159                 stats.obytes   -= port->stats.obytes;
1160                 port->stats.obytes = 0;
1161                 stats.imissed  -= port->stats.imissed;
1162                 port->stats.imissed = 0;
1163                 stats.oerrors  -= port->stats.oerrors;
1164                 port->stats.oerrors = 0;
1165                 stats.rx_nombuf -= port->stats.rx_nombuf;
1166                 port->stats.rx_nombuf = 0;
1167
1168                 total_recv += stats.ipackets;
1169                 total_xmit += stats.opackets;
1170                 total_rx_dropped += stats.imissed;
1171                 total_tx_dropped += port->tx_dropped;
1172                 total_rx_nombuf  += stats.rx_nombuf;
1173
1174                 fwd_port_stats_display(pt_id, &stats);
1175         }
1176         printf("\n  %s Accumulated forward statistics for all ports"
1177                "%s\n",
1178                acc_stats_border, acc_stats_border);
1179         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1180                "%-"PRIu64"\n"
1181                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1182                "%-"PRIu64"\n",
1183                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1184                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1185         if (total_rx_nombuf > 0)
1186                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1187         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1188                "%s\n",
1189                acc_stats_border, acc_stats_border);
1190 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1191         if (total_recv > 0)
1192                 printf("\n  CPU cycles/packet=%u (total cycles="
1193                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1194                        (unsigned int)(fwd_cycles / total_recv),
1195                        fwd_cycles, total_recv);
1196 #endif
1197         printf("\nDone.\n");
1198         test_done = 1;
1199 }
1200
1201 void
1202 dev_set_link_up(portid_t pid)
1203 {
1204         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1205                 printf("\nSet link up fail.\n");
1206 }
1207
1208 void
1209 dev_set_link_down(portid_t pid)
1210 {
1211         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1212                 printf("\nSet link down fail.\n");
1213 }
1214
1215 static int
1216 all_ports_started(void)
1217 {
1218         portid_t pi;
1219         struct rte_port *port;
1220
1221         FOREACH_PORT(pi, ports) {
1222                 port = &ports[pi];
1223                 /* Check if there is a port which is not started */
1224                 if ((port->port_status != RTE_PORT_STARTED) &&
1225                         (port->slave_flag == 0))
1226                         return 0;
1227         }
1228
1229         /* No port is not started */
1230         return 1;
1231 }
1232
1233 int
1234 all_ports_stopped(void)
1235 {
1236         portid_t pi;
1237         struct rte_port *port;
1238
1239         FOREACH_PORT(pi, ports) {
1240                 port = &ports[pi];
1241                 if ((port->port_status != RTE_PORT_STOPPED) &&
1242                         (port->slave_flag == 0))
1243                         return 0;
1244         }
1245
1246         return 1;
1247 }
1248
1249 int
1250 port_is_started(portid_t port_id)
1251 {
1252         if (port_id_is_invalid(port_id, ENABLED_WARN))
1253                 return 0;
1254
1255         if (ports[port_id].port_status != RTE_PORT_STARTED)
1256                 return 0;
1257
1258         return 1;
1259 }
1260
1261 static int
1262 port_is_closed(portid_t port_id)
1263 {
1264         if (port_id_is_invalid(port_id, ENABLED_WARN))
1265                 return 0;
1266
1267         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1268                 return 0;
1269
1270         return 1;
1271 }
1272
1273 int
1274 start_port(portid_t pid)
1275 {
1276         int diag, need_check_link_status = -1;
1277         portid_t pi;
1278         queueid_t qi;
1279         struct rte_port *port;
1280         struct ether_addr mac_addr;
1281
1282         if (port_id_is_invalid(pid, ENABLED_WARN))
1283                 return 0;
1284
1285         if (init_fwd_streams() < 0) {
1286                 printf("Fail from init_fwd_streams()\n");
1287                 return -1;
1288         }
1289
1290         if(dcb_config)
1291                 dcb_test = 1;
1292         FOREACH_PORT(pi, ports) {
1293                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1294                         continue;
1295
1296                 need_check_link_status = 0;
1297                 port = &ports[pi];
1298                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1299                                                  RTE_PORT_HANDLING) == 0) {
1300                         printf("Port %d is now not stopped\n", pi);
1301                         continue;
1302                 }
1303
1304                 if (port->need_reconfig > 0) {
1305                         port->need_reconfig = 0;
1306
1307                         printf("Configuring Port %d (socket %u)\n", pi,
1308                                         port->socket_id);
1309                         /* configure port */
1310                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1311                                                 &(port->dev_conf));
1312                         if (diag != 0) {
1313                                 if (rte_atomic16_cmpset(&(port->port_status),
1314                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1315                                         printf("Port %d can not be set back "
1316                                                         "to stopped\n", pi);
1317                                 printf("Fail to configure port %d\n", pi);
1318                                 /* try to reconfigure port next time */
1319                                 port->need_reconfig = 1;
1320                                 return -1;
1321                         }
1322                 }
1323                 if (port->need_reconfig_queues > 0) {
1324                         port->need_reconfig_queues = 0;
1325                         /* setup tx queues */
1326                         for (qi = 0; qi < nb_txq; qi++) {
1327                                 if ((numa_support) &&
1328                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1329                                         diag = rte_eth_tx_queue_setup(pi, qi,
1330                                                 nb_txd,txring_numa[pi],
1331                                                 &(port->tx_conf));
1332                                 else
1333                                         diag = rte_eth_tx_queue_setup(pi, qi,
1334                                                 nb_txd,port->socket_id,
1335                                                 &(port->tx_conf));
1336
1337                                 if (diag == 0)
1338                                         continue;
1339
1340                                 /* Fail to setup tx queue, return */
1341                                 if (rte_atomic16_cmpset(&(port->port_status),
1342                                                         RTE_PORT_HANDLING,
1343                                                         RTE_PORT_STOPPED) == 0)
1344                                         printf("Port %d can not be set back "
1345                                                         "to stopped\n", pi);
1346                                 printf("Fail to configure port %d tx queues\n", pi);
1347                                 /* try to reconfigure queues next time */
1348                                 port->need_reconfig_queues = 1;
1349                                 return -1;
1350                         }
1351                         /* setup rx queues */
1352                         for (qi = 0; qi < nb_rxq; qi++) {
1353                                 if ((numa_support) &&
1354                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1355                                         struct rte_mempool * mp =
1356                                                 mbuf_pool_find(rxring_numa[pi]);
1357                                         if (mp == NULL) {
1358                                                 printf("Failed to setup RX queue:"
1359                                                         "No mempool allocation"
1360                                                         "on the socket %d\n",
1361                                                         rxring_numa[pi]);
1362                                                 return -1;
1363                                         }
1364
1365                                         diag = rte_eth_rx_queue_setup(pi, qi,
1366                                              nb_rxd,rxring_numa[pi],
1367                                              &(port->rx_conf),mp);
1368                                 }
1369                                 else
1370                                         diag = rte_eth_rx_queue_setup(pi, qi,
1371                                              nb_rxd,port->socket_id,
1372                                              &(port->rx_conf),
1373                                              mbuf_pool_find(port->socket_id));
1374
1375                                 if (diag == 0)
1376                                         continue;
1377
1378
1379                                 /* Fail to setup rx queue, return */
1380                                 if (rte_atomic16_cmpset(&(port->port_status),
1381                                                         RTE_PORT_HANDLING,
1382                                                         RTE_PORT_STOPPED) == 0)
1383                                         printf("Port %d can not be set back "
1384                                                         "to stopped\n", pi);
1385                                 printf("Fail to configure port %d rx queues\n", pi);
1386                                 /* try to reconfigure queues next time */
1387                                 port->need_reconfig_queues = 1;
1388                                 return -1;
1389                         }
1390                 }
1391                 /* start port */
1392                 if (rte_eth_dev_start(pi) < 0) {
1393                         printf("Fail to start port %d\n", pi);
1394
1395                         /* Fail to setup rx queue, return */
1396                         if (rte_atomic16_cmpset(&(port->port_status),
1397                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1398                                 printf("Port %d can not be set back to "
1399                                                         "stopped\n", pi);
1400                         continue;
1401                 }
1402
1403                 if (rte_atomic16_cmpset(&(port->port_status),
1404                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1405                         printf("Port %d can not be set into started\n", pi);
1406
1407                 rte_eth_macaddr_get(pi, &mac_addr);
1408                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1409                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1410                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1411                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1412
1413                 /* at least one port started, need checking link status */
1414                 need_check_link_status = 1;
1415         }
1416
1417         if (need_check_link_status == 1 && !no_link_check)
1418                 check_all_ports_link_status(RTE_PORT_ALL);
1419         else if (need_check_link_status == 0)
1420                 printf("Please stop the ports first\n");
1421
1422         printf("Done\n");
1423         return 0;
1424 }
1425
1426 void
1427 stop_port(portid_t pid)
1428 {
1429         portid_t pi;
1430         struct rte_port *port;
1431         int need_check_link_status = 0;
1432
1433         if (dcb_test) {
1434                 dcb_test = 0;
1435                 dcb_config = 0;
1436         }
1437
1438         if (port_id_is_invalid(pid, ENABLED_WARN))
1439                 return;
1440
1441         printf("Stopping ports...\n");
1442
1443         FOREACH_PORT(pi, ports) {
1444                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1445                         continue;
1446
1447                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1448                         printf("Please remove port %d from forwarding configuration.\n", pi);
1449                         continue;
1450                 }
1451
1452                 if (port_is_bonding_slave(pi)) {
1453                         printf("Please remove port %d from bonded device.\n", pi);
1454                         continue;
1455                 }
1456
1457                 port = &ports[pi];
1458                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1459                                                 RTE_PORT_HANDLING) == 0)
1460                         continue;
1461
1462                 rte_eth_dev_stop(pi);
1463
1464                 if (rte_atomic16_cmpset(&(port->port_status),
1465                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1466                         printf("Port %d can not be set into stopped\n", pi);
1467                 need_check_link_status = 1;
1468         }
1469         if (need_check_link_status && !no_link_check)
1470                 check_all_ports_link_status(RTE_PORT_ALL);
1471
1472         printf("Done\n");
1473 }
1474
1475 void
1476 close_port(portid_t pid)
1477 {
1478         portid_t pi;
1479         struct rte_port *port;
1480
1481         if (port_id_is_invalid(pid, ENABLED_WARN))
1482                 return;
1483
1484         printf("Closing ports...\n");
1485
1486         FOREACH_PORT(pi, ports) {
1487                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1488                         continue;
1489
1490                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1491                         printf("Please remove port %d from forwarding configuration.\n", pi);
1492                         continue;
1493                 }
1494
1495                 if (port_is_bonding_slave(pi)) {
1496                         printf("Please remove port %d from bonded device.\n", pi);
1497                         continue;
1498                 }
1499
1500                 port = &ports[pi];
1501                 if (rte_atomic16_cmpset(&(port->port_status),
1502                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1503                         printf("Port %d is already closed\n", pi);
1504                         continue;
1505                 }
1506
1507                 if (rte_atomic16_cmpset(&(port->port_status),
1508                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1509                         printf("Port %d is now not stopped\n", pi);
1510                         continue;
1511                 }
1512
1513                 rte_eth_dev_close(pi);
1514
1515                 if (rte_atomic16_cmpset(&(port->port_status),
1516                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1517                         printf("Port %d cannot be set to closed\n", pi);
1518         }
1519
1520         printf("Done\n");
1521 }
1522
1523 void
1524 attach_port(char *identifier)
1525 {
1526         portid_t pi = 0;
1527
1528         printf("Attaching a new port...\n");
1529
1530         if (identifier == NULL) {
1531                 printf("Invalid parameters are specified\n");
1532                 return;
1533         }
1534
1535         if (rte_eth_dev_attach(identifier, &pi))
1536                 return;
1537
1538         ports[pi].enabled = 1;
1539         reconfig(pi, rte_eth_dev_socket_id(pi));
1540         rte_eth_promiscuous_enable(pi);
1541
1542         nb_ports = rte_eth_dev_count();
1543
1544         ports[pi].port_status = RTE_PORT_STOPPED;
1545
1546         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1547         printf("Done\n");
1548 }
1549
1550 void
1551 detach_port(uint8_t port_id)
1552 {
1553         char name[RTE_ETH_NAME_MAX_LEN];
1554
1555         printf("Detaching a port...\n");
1556
1557         if (!port_is_closed(port_id)) {
1558                 printf("Please close port first\n");
1559                 return;
1560         }
1561
1562         if (rte_eth_dev_detach(port_id, name))
1563                 return;
1564
1565         ports[port_id].enabled = 0;
1566         nb_ports = rte_eth_dev_count();
1567
1568         printf("Port '%s' is detached. Now total ports is %d\n",
1569                         name, nb_ports);
1570         printf("Done\n");
1571         return;
1572 }
1573
1574 void
1575 pmd_test_exit(void)
1576 {
1577         portid_t pt_id;
1578
1579         if (test_done == 0)
1580                 stop_packet_forwarding();
1581
1582         if (ports != NULL) {
1583                 no_link_check = 1;
1584                 FOREACH_PORT(pt_id, ports) {
1585                         printf("\nShutting down port %d...\n", pt_id);
1586                         fflush(stdout);
1587                         stop_port(pt_id);
1588                         close_port(pt_id);
1589                 }
1590         }
1591         printf("\nBye...\n");
1592 }
1593
1594 typedef void (*cmd_func_t)(void);
1595 struct pmd_test_command {
1596         const char *cmd_name;
1597         cmd_func_t cmd_func;
1598 };
1599
1600 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1601
1602 /* Check the link status of all ports in up to 9s, and print them finally */
1603 static void
1604 check_all_ports_link_status(uint32_t port_mask)
1605 {
1606 #define CHECK_INTERVAL 100 /* 100ms */
1607 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1608         uint8_t portid, count, all_ports_up, print_flag = 0;
1609         struct rte_eth_link link;
1610
1611         printf("Checking link statuses...\n");
1612         fflush(stdout);
1613         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1614                 all_ports_up = 1;
1615                 FOREACH_PORT(portid, ports) {
1616                         if ((port_mask & (1 << portid)) == 0)
1617                                 continue;
1618                         memset(&link, 0, sizeof(link));
1619                         rte_eth_link_get_nowait(portid, &link);
1620                         /* print link status if flag set */
1621                         if (print_flag == 1) {
1622                                 if (link.link_status)
1623                                         printf("Port %d Link Up - speed %u "
1624                                                 "Mbps - %s\n", (uint8_t)portid,
1625                                                 (unsigned)link.link_speed,
1626                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1627                                         ("full-duplex") : ("half-duplex\n"));
1628                                 else
1629                                         printf("Port %d Link Down\n",
1630                                                 (uint8_t)portid);
1631                                 continue;
1632                         }
1633                         /* clear all_ports_up flag if any link down */
1634                         if (link.link_status == ETH_LINK_DOWN) {
1635                                 all_ports_up = 0;
1636                                 break;
1637                         }
1638                 }
1639                 /* after finally printing all link status, get out */
1640                 if (print_flag == 1)
1641                         break;
1642
1643                 if (all_ports_up == 0) {
1644                         fflush(stdout);
1645                         rte_delay_ms(CHECK_INTERVAL);
1646                 }
1647
1648                 /* set the print_flag if all ports up or timeout */
1649                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1650                         print_flag = 1;
1651                 }
1652         }
1653 }
1654
1655 static int
1656 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1657 {
1658         uint16_t i;
1659         int diag;
1660         uint8_t mapping_found = 0;
1661
1662         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1663                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1664                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1665                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1666                                         tx_queue_stats_mappings[i].queue_id,
1667                                         tx_queue_stats_mappings[i].stats_counter_id);
1668                         if (diag != 0)
1669                                 return diag;
1670                         mapping_found = 1;
1671                 }
1672         }
1673         if (mapping_found)
1674                 port->tx_queue_stats_mapping_enabled = 1;
1675         return 0;
1676 }
1677
1678 static int
1679 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1680 {
1681         uint16_t i;
1682         int diag;
1683         uint8_t mapping_found = 0;
1684
1685         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1686                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1687                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1688                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1689                                         rx_queue_stats_mappings[i].queue_id,
1690                                         rx_queue_stats_mappings[i].stats_counter_id);
1691                         if (diag != 0)
1692                                 return diag;
1693                         mapping_found = 1;
1694                 }
1695         }
1696         if (mapping_found)
1697                 port->rx_queue_stats_mapping_enabled = 1;
1698         return 0;
1699 }
1700
1701 static void
1702 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1703 {
1704         int diag = 0;
1705
1706         diag = set_tx_queue_stats_mapping_registers(pi, port);
1707         if (diag != 0) {
1708                 if (diag == -ENOTSUP) {
1709                         port->tx_queue_stats_mapping_enabled = 0;
1710                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1711                 }
1712                 else
1713                         rte_exit(EXIT_FAILURE,
1714                                         "set_tx_queue_stats_mapping_registers "
1715                                         "failed for port id=%d diag=%d\n",
1716                                         pi, diag);
1717         }
1718
1719         diag = set_rx_queue_stats_mapping_registers(pi, port);
1720         if (diag != 0) {
1721                 if (diag == -ENOTSUP) {
1722                         port->rx_queue_stats_mapping_enabled = 0;
1723                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1724                 }
1725                 else
1726                         rte_exit(EXIT_FAILURE,
1727                                         "set_rx_queue_stats_mapping_registers "
1728                                         "failed for port id=%d diag=%d\n",
1729                                         pi, diag);
1730         }
1731 }
1732
1733 static void
1734 rxtx_port_config(struct rte_port *port)
1735 {
1736         port->rx_conf = port->dev_info.default_rxconf;
1737         port->tx_conf = port->dev_info.default_txconf;
1738
1739         /* Check if any RX/TX parameters have been passed */
1740         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1741                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1742
1743         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1744                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1745
1746         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1747                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1748
1749         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1750                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1751
1752         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1753                 port->rx_conf.rx_drop_en = rx_drop_en;
1754
1755         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1756                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1757
1758         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1759                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1760
1761         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1762                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1763
1764         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1765                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1766
1767         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1768                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1769
1770         if (txq_flags != RTE_PMD_PARAM_UNSET)
1771                 port->tx_conf.txq_flags = txq_flags;
1772 }
1773
1774 void
1775 init_port_config(void)
1776 {
1777         portid_t pid;
1778         struct rte_port *port;
1779
1780         FOREACH_PORT(pid, ports) {
1781                 port = &ports[pid];
1782                 port->dev_conf.rxmode = rx_mode;
1783                 port->dev_conf.fdir_conf = fdir_conf;
1784                 if (nb_rxq > 1) {
1785                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1786                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1787                 } else {
1788                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1789                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1790                 }
1791
1792                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1793                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1794                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1795                         else
1796                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1797                 }
1798
1799                 if (port->dev_info.max_vfs != 0) {
1800                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1801                                 port->dev_conf.rxmode.mq_mode =
1802                                         ETH_MQ_RX_VMDQ_RSS;
1803                         else
1804                                 port->dev_conf.rxmode.mq_mode =
1805                                         ETH_MQ_RX_NONE;
1806
1807                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1808                 }
1809
1810                 rxtx_port_config(port);
1811
1812                 rte_eth_macaddr_get(pid, &port->eth_addr);
1813
1814                 map_port_queue_stats_mapping_registers(pid, port);
1815 #ifdef RTE_NIC_BYPASS
1816                 rte_eth_dev_bypass_init(pid);
1817 #endif
1818         }
1819 }
1820
1821 void set_port_slave_flag(portid_t slave_pid)
1822 {
1823         struct rte_port *port;
1824
1825         port = &ports[slave_pid];
1826         port->slave_flag = 1;
1827 }
1828
1829 void clear_port_slave_flag(portid_t slave_pid)
1830 {
1831         struct rte_port *port;
1832
1833         port = &ports[slave_pid];
1834         port->slave_flag = 0;
1835 }
1836
1837 uint8_t port_is_bonding_slave(portid_t slave_pid)
1838 {
1839         struct rte_port *port;
1840
1841         port = &ports[slave_pid];
1842         return port->slave_flag;
1843 }
1844
1845 const uint16_t vlan_tags[] = {
1846                 0,  1,  2,  3,  4,  5,  6,  7,
1847                 8,  9, 10, 11,  12, 13, 14, 15,
1848                 16, 17, 18, 19, 20, 21, 22, 23,
1849                 24, 25, 26, 27, 28, 29, 30, 31
1850 };
1851
1852 static  int
1853 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1854                  enum dcb_mode_enable dcb_mode,
1855                  enum rte_eth_nb_tcs num_tcs,
1856                  uint8_t pfc_en)
1857 {
1858         uint8_t i;
1859
1860         /*
1861          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1862          * given above, and the number of traffic classes available for use.
1863          */
1864         if (dcb_mode == DCB_VT_ENABLED) {
1865                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1866                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1867                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1868                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1869
1870                 /* VMDQ+DCB RX and TX configrations */
1871                 vmdq_rx_conf->enable_default_pool = 0;
1872                 vmdq_rx_conf->default_pool = 0;
1873                 vmdq_rx_conf->nb_queue_pools =
1874                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1875                 vmdq_tx_conf->nb_queue_pools =
1876                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1877
1878                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1879                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1880                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1881                         vmdq_rx_conf->pool_map[i].pools =
1882                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1883                 }
1884                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1885                         vmdq_rx_conf->dcb_tc[i] = i;
1886                         vmdq_tx_conf->dcb_tc[i] = i;
1887                 }
1888
1889                 /* set DCB mode of RX and TX of multiple queues */
1890                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1891                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1892         } else {
1893                 struct rte_eth_dcb_rx_conf *rx_conf =
1894                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1895                 struct rte_eth_dcb_tx_conf *tx_conf =
1896                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1897
1898                 rx_conf->nb_tcs = num_tcs;
1899                 tx_conf->nb_tcs = num_tcs;
1900
1901                 for (i = 0; i < num_tcs; i++) {
1902                         rx_conf->dcb_tc[i] = i;
1903                         tx_conf->dcb_tc[i] = i;
1904                 }
1905                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1906                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1907                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1908         }
1909
1910         if (pfc_en)
1911                 eth_conf->dcb_capability_en =
1912                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1913         else
1914                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1915
1916         return 0;
1917 }
1918
1919 int
1920 init_port_dcb_config(portid_t pid,
1921                      enum dcb_mode_enable dcb_mode,
1922                      enum rte_eth_nb_tcs num_tcs,
1923                      uint8_t pfc_en)
1924 {
1925         struct rte_eth_conf port_conf;
1926         struct rte_eth_dev_info dev_info;
1927         struct rte_port *rte_port;
1928         int retval;
1929         uint16_t i;
1930
1931         rte_eth_dev_info_get(pid, &dev_info);
1932
1933         /* If dev_info.vmdq_pool_base is greater than 0,
1934          * the queue id of vmdq pools is started after pf queues.
1935          */
1936         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1937                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1938                         " for port %d.", pid);
1939                 return -1;
1940         }
1941
1942         /* Assume the ports in testpmd have the same dcb capability
1943          * and has the same number of rxq and txq in dcb mode
1944          */
1945         if (dcb_mode == DCB_VT_ENABLED) {
1946                 nb_rxq = dev_info.max_rx_queues;
1947                 nb_txq = dev_info.max_tx_queues;
1948         } else {
1949                 /*if vt is disabled, use all pf queues */
1950                 if (dev_info.vmdq_pool_base == 0) {
1951                         nb_rxq = dev_info.max_rx_queues;
1952                         nb_txq = dev_info.max_tx_queues;
1953                 } else {
1954                         nb_rxq = (queueid_t)num_tcs;
1955                         nb_txq = (queueid_t)num_tcs;
1956
1957                 }
1958         }
1959         rx_free_thresh = 64;
1960
1961         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1962         /* Enter DCB configuration status */
1963         dcb_config = 1;
1964
1965         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1966         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1967         if (retval < 0)
1968                 return retval;
1969
1970         rte_port = &ports[pid];
1971         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1972
1973         rxtx_port_config(rte_port);
1974         /* VLAN filter */
1975         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1976         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1977                 rx_vft_set(pid, vlan_tags[i], 1);
1978
1979         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1980         map_port_queue_stats_mapping_registers(pid, rte_port);
1981
1982         rte_port->dcb_flag = 1;
1983
1984         return 0;
1985 }
1986
1987 static void
1988 init_port(void)
1989 {
1990         portid_t pid;
1991
1992         /* Configuration of Ethernet ports. */
1993         ports = rte_zmalloc("testpmd: ports",
1994                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1995                             RTE_CACHE_LINE_SIZE);
1996         if (ports == NULL) {
1997                 rte_exit(EXIT_FAILURE,
1998                                 "rte_zmalloc(%d struct rte_port) failed\n",
1999                                 RTE_MAX_ETHPORTS);
2000         }
2001
2002         /* enabled allocated ports */
2003         for (pid = 0; pid < nb_ports; pid++)
2004                 ports[pid].enabled = 1;
2005 }
2006
2007 static void
2008 force_quit(void)
2009 {
2010         pmd_test_exit();
2011         prompt_exit();
2012 }
2013
2014 static void
2015 signal_handler(int signum)
2016 {
2017         if (signum == SIGINT || signum == SIGTERM) {
2018                 printf("\nSignal %d received, preparing to exit...\n",
2019                                 signum);
2020                 force_quit();
2021                 /* exit with the expected status */
2022                 signal(signum, SIG_DFL);
2023                 kill(getpid(), signum);
2024         }
2025 }
2026
2027 int
2028 main(int argc, char** argv)
2029 {
2030         int  diag;
2031         uint8_t port_id;
2032
2033         signal(SIGINT, signal_handler);
2034         signal(SIGTERM, signal_handler);
2035
2036         diag = rte_eal_init(argc, argv);
2037         if (diag < 0)
2038                 rte_panic("Cannot init EAL\n");
2039
2040         nb_ports = (portid_t) rte_eth_dev_count();
2041         if (nb_ports == 0)
2042                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2043
2044         /* allocate port structures, and init them */
2045         init_port();
2046
2047         set_def_fwd_config();
2048         if (nb_lcores == 0)
2049                 rte_panic("Empty set of forwarding logical cores - check the "
2050                           "core mask supplied in the command parameters\n");
2051
2052         argc -= diag;
2053         argv += diag;
2054         if (argc > 1)
2055                 launch_args_parse(argc, argv);
2056
2057         if (!nb_rxq && !nb_txq)
2058                 printf("Warning: Either rx or tx queues should be non-zero\n");
2059
2060         if (nb_rxq > 1 && nb_rxq > nb_txq)
2061                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2062                        "but nb_txq=%d will prevent to fully test it.\n",
2063                        nb_rxq, nb_txq);
2064
2065         init_config();
2066         if (start_port(RTE_PORT_ALL) != 0)
2067                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2068
2069         /* set all ports to promiscuous mode by default */
2070         FOREACH_PORT(port_id, ports)
2071                 rte_eth_promiscuous_enable(port_id);
2072
2073 #ifdef RTE_LIBRTE_CMDLINE
2074         if (interactive == 1) {
2075                 if (auto_start) {
2076                         printf("Start automatic packet forwarding\n");
2077                         start_packet_forwarding(0);
2078                 }
2079                 prompt();
2080         } else
2081 #endif
2082         {
2083                 char c;
2084                 int rc;
2085
2086                 printf("No commandline core given, start packet forwarding\n");
2087                 start_packet_forwarding(0);
2088                 printf("Press enter to exit\n");
2089                 rc = read(0, &c, 1);
2090                 pmd_test_exit();
2091                 if (rc < 0)
2092                         return 1;
2093         }
2094
2095         return 0;
2096 }