f22d1b6f61a3d38adf9646b7edf87f81c98c5bf5
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79
80 #include "testpmd.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp = NULL;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419         RTE_LOG(INFO, USER1,
420                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
422
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425                 (unsigned) mb_mempool_cache,
426                 sizeof(struct rte_pktmbuf_pool_private),
427                 rte_pktmbuf_pool_init, NULL,
428                 rte_pktmbuf_init, NULL,
429                 socket_id, 0);
430 #endif
431
432         /* if the former XEN allocation failed fall back to normal allocation */
433         if (rte_mp == NULL) {
434                 if (mp_anon != 0) {
435                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436                                 mb_size, (unsigned) mb_mempool_cache,
437                                 sizeof(struct rte_pktmbuf_pool_private),
438                                 socket_id, 0);
439
440                         if (rte_mempool_populate_anon(rte_mp) == 0) {
441                                 rte_mempool_free(rte_mp);
442                                 rte_mp = NULL;
443                         }
444                         rte_pktmbuf_pool_init(rte_mp, NULL);
445                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
446                 } else {
447                         /* wrapper to rte_mempool_create() */
448                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
450                 }
451         }
452
453         if (rte_mp == NULL) {
454                 rte_exit(EXIT_FAILURE,
455                         "Creation of mbuf pool for socket %u failed: %s\n",
456                         socket_id, rte_strerror(rte_errno));
457         } else if (verbose_level > 0) {
458                 rte_mempool_dump(stdout, rte_mp);
459         }
460 }
461
462 /*
463  * Check given socket id is valid or not with NUMA mode,
464  * if valid, return 0, else return -1
465  */
466 static int
467 check_socket_id(const unsigned int socket_id)
468 {
469         static int warning_once = 0;
470
471         if (socket_id >= max_socket) {
472                 if (!warning_once && numa_support)
473                         printf("Warning: NUMA should be configured manually by"
474                                " using --port-numa-config and"
475                                " --ring-numa-config parameters along with"
476                                " --numa.\n");
477                 warning_once = 1;
478                 return -1;
479         }
480         return 0;
481 }
482
483 static void
484 init_config(void)
485 {
486         portid_t pid;
487         struct rte_port *port;
488         struct rte_mempool *mbp;
489         unsigned int nb_mbuf_per_pool;
490         lcoreid_t  lc_id;
491         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
492
493         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494         /* Configuration of logical cores. */
495         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496                                 sizeof(struct fwd_lcore *) * nb_lcores,
497                                 RTE_CACHE_LINE_SIZE);
498         if (fwd_lcores == NULL) {
499                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500                                                         "failed\n", nb_lcores);
501         }
502         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504                                                sizeof(struct fwd_lcore),
505                                                RTE_CACHE_LINE_SIZE);
506                 if (fwd_lcores[lc_id] == NULL) {
507                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
508                                                                 "failed\n");
509                 }
510                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
511         }
512
513         /*
514          * Create pools of mbuf.
515          * If NUMA support is disabled, create a single pool of mbuf in
516          * socket 0 memory by default.
517          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
518          *
519          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520          * nb_txd can be configured at run time.
521          */
522         if (param_total_num_mbufs)
523                 nb_mbuf_per_pool = param_total_num_mbufs;
524         else {
525                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
527
528                 if (!numa_support)
529                         nb_mbuf_per_pool =
530                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
531         }
532
533         if (!numa_support) {
534                 if (socket_num == UMA_NO_CONFIG)
535                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
536                 else
537                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
538                                                  socket_num);
539         }
540
541         FOREACH_PORT(pid, ports) {
542                 port = &ports[pid];
543                 rte_eth_dev_info_get(pid, &port->dev_info);
544
545                 if (numa_support) {
546                         if (port_numa[pid] != NUMA_NO_CONFIG)
547                                 port_per_socket[port_numa[pid]]++;
548                         else {
549                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
550
551                                 /* if socket_id is invalid, set to 0 */
552                                 if (check_socket_id(socket_id) < 0)
553                                         socket_id = 0;
554                                 port_per_socket[socket_id]++;
555                         }
556                 }
557
558                 /* set flag to initialize port/queue */
559                 port->need_reconfig = 1;
560                 port->need_reconfig_queues = 1;
561         }
562
563         if (numa_support) {
564                 uint8_t i;
565                 unsigned int nb_mbuf;
566
567                 if (param_total_num_mbufs)
568                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
569
570                 for (i = 0; i < max_socket; i++) {
571                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572                         if (nb_mbuf)
573                                 mbuf_pool_create(mbuf_data_size,
574                                                 nb_mbuf,i);
575                 }
576         }
577         init_port_config();
578
579         /*
580          * Records which Mbuf pool to use by each logical core, if needed.
581          */
582         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583                 mbp = mbuf_pool_find(
584                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
585
586                 if (mbp == NULL)
587                         mbp = mbuf_pool_find(0);
588                 fwd_lcores[lc_id]->mbp = mbp;
589         }
590
591         /* Configuration of packet forwarding streams. */
592         if (init_fwd_streams() < 0)
593                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
594 }
595
596
597 void
598 reconfig(portid_t new_port_id, unsigned socket_id)
599 {
600         struct rte_port *port;
601
602         /* Reconfiguration of Ethernet ports. */
603         port = &ports[new_port_id];
604         rte_eth_dev_info_get(new_port_id, &port->dev_info);
605
606         /* set flag to initialize port/queue */
607         port->need_reconfig = 1;
608         port->need_reconfig_queues = 1;
609         port->socket_id = socket_id;
610
611         init_port_config();
612 }
613
614
615 int
616 init_fwd_streams(void)
617 {
618         portid_t pid;
619         struct rte_port *port;
620         streamid_t sm_id, nb_fwd_streams_new;
621         queueid_t q;
622
623         /* set socket id according to numa or not */
624         FOREACH_PORT(pid, ports) {
625                 port = &ports[pid];
626                 if (nb_rxq > port->dev_info.max_rx_queues) {
627                         printf("Fail: nb_rxq(%d) is greater than "
628                                 "max_rx_queues(%d)\n", nb_rxq,
629                                 port->dev_info.max_rx_queues);
630                         return -1;
631                 }
632                 if (nb_txq > port->dev_info.max_tx_queues) {
633                         printf("Fail: nb_txq(%d) is greater than "
634                                 "max_tx_queues(%d)\n", nb_txq,
635                                 port->dev_info.max_tx_queues);
636                         return -1;
637                 }
638                 if (numa_support) {
639                         if (port_numa[pid] != NUMA_NO_CONFIG)
640                                 port->socket_id = port_numa[pid];
641                         else {
642                                 port->socket_id = rte_eth_dev_socket_id(pid);
643
644                                 /* if socket_id is invalid, set to 0 */
645                                 if (check_socket_id(port->socket_id) < 0)
646                                         port->socket_id = 0;
647                         }
648                 }
649                 else {
650                         if (socket_num == UMA_NO_CONFIG)
651                                 port->socket_id = 0;
652                         else
653                                 port->socket_id = socket_num;
654                 }
655         }
656
657         q = RTE_MAX(nb_rxq, nb_txq);
658         if (q == 0) {
659                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
660                 return -1;
661         }
662         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
663         if (nb_fwd_streams_new == nb_fwd_streams)
664                 return 0;
665         /* clear the old */
666         if (fwd_streams != NULL) {
667                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668                         if (fwd_streams[sm_id] == NULL)
669                                 continue;
670                         rte_free(fwd_streams[sm_id]);
671                         fwd_streams[sm_id] = NULL;
672                 }
673                 rte_free(fwd_streams);
674                 fwd_streams = NULL;
675         }
676
677         /* init new */
678         nb_fwd_streams = nb_fwd_streams_new;
679         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
681         if (fwd_streams == NULL)
682                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683                                                 "failed\n", nb_fwd_streams);
684
685         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
688                 if (fwd_streams[sm_id] == NULL)
689                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
690                                                                 " failed\n");
691         }
692
693         return 0;
694 }
695
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 static void
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 {
700         unsigned int total_burst;
701         unsigned int nb_burst;
702         unsigned int burst_stats[3];
703         uint16_t pktnb_stats[3];
704         uint16_t nb_pkt;
705         int burst_percent[3];
706
707         /*
708          * First compute the total number of packet bursts and the
709          * two highest numbers of bursts of the same number of packets.
710          */
711         total_burst = 0;
712         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
716                 if (nb_burst == 0)
717                         continue;
718                 total_burst += nb_burst;
719                 if (nb_burst > burst_stats[0]) {
720                         burst_stats[1] = burst_stats[0];
721                         pktnb_stats[1] = pktnb_stats[0];
722                         burst_stats[0] = nb_burst;
723                         pktnb_stats[0] = nb_pkt;
724                 }
725         }
726         if (total_burst == 0)
727                 return;
728         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730                burst_percent[0], (int) pktnb_stats[0]);
731         if (burst_stats[0] == total_burst) {
732                 printf("]\n");
733                 return;
734         }
735         if (burst_stats[0] + burst_stats[1] == total_burst) {
736                 printf(" + %d%% of %d pkts]\n",
737                        100 - burst_percent[0], pktnb_stats[1]);
738                 return;
739         }
740         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
744                 return;
745         }
746         printf(" + %d%% of %d pkts + %d%% of others]\n",
747                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 }
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
750
751 static void
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 {
754         struct rte_port *port;
755         uint8_t i;
756
757         static const char *fwd_stats_border = "----------------------";
758
759         port = &ports[port_id];
760         printf("\n  %s Forward statistics for port %-2d %s\n",
761                fwd_stats_border, port_id, fwd_stats_border);
762
763         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765                        "%-"PRIu64"\n",
766                        stats->ipackets, stats->imissed,
767                        (uint64_t) (stats->ipackets + stats->imissed));
768
769                 if (cur_fwd_eng == &csum_fwd_engine)
770                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
773                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
774                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
775                 }
776
777                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
778                        "%-"PRIu64"\n",
779                        stats->opackets, port->tx_dropped,
780                        (uint64_t) (stats->opackets + port->tx_dropped));
781         }
782         else {
783                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
784                        "%14"PRIu64"\n",
785                        stats->ipackets, stats->imissed,
786                        (uint64_t) (stats->ipackets + stats->imissed));
787
788                 if (cur_fwd_eng == &csum_fwd_engine)
789                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
790                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
792                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
793                         printf("  RX-nombufs:             %14"PRIu64"\n",
794                                stats->rx_nombuf);
795                 }
796
797                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
798                        "%14"PRIu64"\n",
799                        stats->opackets, port->tx_dropped,
800                        (uint64_t) (stats->opackets + port->tx_dropped));
801         }
802
803 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
804         if (port->rx_stream)
805                 pkt_burst_stats_display("RX",
806                         &port->rx_stream->rx_burst_stats);
807         if (port->tx_stream)
808                 pkt_burst_stats_display("TX",
809                         &port->tx_stream->tx_burst_stats);
810 #endif
811
812         if (port->rx_queue_stats_mapping_enabled) {
813                 printf("\n");
814                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
815                         printf("  Stats reg %2d RX-packets:%14"PRIu64
816                                "     RX-errors:%14"PRIu64
817                                "    RX-bytes:%14"PRIu64"\n",
818                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
819                 }
820                 printf("\n");
821         }
822         if (port->tx_queue_stats_mapping_enabled) {
823                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
824                         printf("  Stats reg %2d TX-packets:%14"PRIu64
825                                "                                 TX-bytes:%14"PRIu64"\n",
826                                i, stats->q_opackets[i], stats->q_obytes[i]);
827                 }
828         }
829
830         printf("  %s--------------------------------%s\n",
831                fwd_stats_border, fwd_stats_border);
832 }
833
834 static void
835 fwd_stream_stats_display(streamid_t stream_id)
836 {
837         struct fwd_stream *fs;
838         static const char *fwd_top_stats_border = "-------";
839
840         fs = fwd_streams[stream_id];
841         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
842             (fs->fwd_dropped == 0))
843                 return;
844         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
845                "TX Port=%2d/Queue=%2d %s\n",
846                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
847                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
848         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
849                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
850
851         /* if checksum mode */
852         if (cur_fwd_eng == &csum_fwd_engine) {
853                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
854                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
855         }
856
857 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
858         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
859         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
860 #endif
861 }
862
863 static void
864 flush_fwd_rx_queues(void)
865 {
866         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
867         portid_t  rxp;
868         portid_t port_id;
869         queueid_t rxq;
870         uint16_t  nb_rx;
871         uint16_t  i;
872         uint8_t   j;
873
874         for (j = 0; j < 2; j++) {
875                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
876                         for (rxq = 0; rxq < nb_rxq; rxq++) {
877                                 port_id = fwd_ports_ids[rxp];
878                                 do {
879                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
880                                                 pkts_burst, MAX_PKT_BURST);
881                                         for (i = 0; i < nb_rx; i++)
882                                                 rte_pktmbuf_free(pkts_burst[i]);
883                                 } while (nb_rx > 0);
884                         }
885                 }
886                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
887         }
888 }
889
890 static void
891 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
892 {
893         struct fwd_stream **fsm;
894         streamid_t nb_fs;
895         streamid_t sm_id;
896
897         fsm = &fwd_streams[fc->stream_idx];
898         nb_fs = fc->stream_nb;
899         do {
900                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
901                         (*pkt_fwd)(fsm[sm_id]);
902         } while (! fc->stopped);
903 }
904
905 static int
906 start_pkt_forward_on_core(void *fwd_arg)
907 {
908         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
909                              cur_fwd_config.fwd_eng->packet_fwd);
910         return 0;
911 }
912
913 /*
914  * Run the TXONLY packet forwarding engine to send a single burst of packets.
915  * Used to start communication flows in network loopback test configurations.
916  */
917 static int
918 run_one_txonly_burst_on_core(void *fwd_arg)
919 {
920         struct fwd_lcore *fwd_lc;
921         struct fwd_lcore tmp_lcore;
922
923         fwd_lc = (struct fwd_lcore *) fwd_arg;
924         tmp_lcore = *fwd_lc;
925         tmp_lcore.stopped = 1;
926         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
927         return 0;
928 }
929
930 /*
931  * Launch packet forwarding:
932  *     - Setup per-port forwarding context.
933  *     - launch logical cores with their forwarding configuration.
934  */
935 static void
936 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
937 {
938         port_fwd_begin_t port_fwd_begin;
939         unsigned int i;
940         unsigned int lc_id;
941         int diag;
942
943         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
944         if (port_fwd_begin != NULL) {
945                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
946                         (*port_fwd_begin)(fwd_ports_ids[i]);
947         }
948         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
949                 lc_id = fwd_lcores_cpuids[i];
950                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
951                         fwd_lcores[i]->stopped = 0;
952                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
953                                                      fwd_lcores[i], lc_id);
954                         if (diag != 0)
955                                 printf("launch lcore %u failed - diag=%d\n",
956                                        lc_id, diag);
957                 }
958         }
959 }
960
961 /*
962  * Launch packet forwarding configuration.
963  */
964 void
965 start_packet_forwarding(int with_tx_first)
966 {
967         port_fwd_begin_t port_fwd_begin;
968         port_fwd_end_t  port_fwd_end;
969         struct rte_port *port;
970         unsigned int i;
971         portid_t   pt_id;
972         streamid_t sm_id;
973
974         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
975                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
976
977         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
978                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
979
980         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
981                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
982                 (!nb_rxq || !nb_txq))
983                 rte_exit(EXIT_FAILURE,
984                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
985                         cur_fwd_eng->fwd_mode_name);
986
987         if (all_ports_started() == 0) {
988                 printf("Not all ports were started\n");
989                 return;
990         }
991         if (test_done == 0) {
992                 printf("Packet forwarding already started\n");
993                 return;
994         }
995         if(dcb_test) {
996                 for (i = 0; i < nb_fwd_ports; i++) {
997                         pt_id = fwd_ports_ids[i];
998                         port = &ports[pt_id];
999                         if (!port->dcb_flag) {
1000                                 printf("In DCB mode, all forwarding ports must "
1001                                        "be configured in this mode.\n");
1002                                 return;
1003                         }
1004                 }
1005                 if (nb_fwd_lcores == 1) {
1006                         printf("In DCB mode,the nb forwarding cores "
1007                                "should be larger than 1.\n");
1008                         return;
1009                 }
1010         }
1011         test_done = 0;
1012
1013         if(!no_flush_rx)
1014                 flush_fwd_rx_queues();
1015
1016         fwd_config_setup();
1017         rxtx_config_display();
1018
1019         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020                 pt_id = fwd_ports_ids[i];
1021                 port = &ports[pt_id];
1022                 rte_eth_stats_get(pt_id, &port->stats);
1023                 port->tx_dropped = 0;
1024
1025                 map_port_queue_stats_mapping_registers(pt_id, port);
1026         }
1027         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028                 fwd_streams[sm_id]->rx_packets = 0;
1029                 fwd_streams[sm_id]->tx_packets = 0;
1030                 fwd_streams[sm_id]->fwd_dropped = 0;
1031                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1033
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1039 #endif
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041                 fwd_streams[sm_id]->core_cycles = 0;
1042 #endif
1043         }
1044         if (with_tx_first) {
1045                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1046                 if (port_fwd_begin != NULL) {
1047                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1049                 }
1050                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1051                 rte_eal_mp_wait_lcore();
1052                 port_fwd_end = tx_only_engine.port_fwd_end;
1053                 if (port_fwd_end != NULL) {
1054                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055                                 (*port_fwd_end)(fwd_ports_ids[i]);
1056                 }
1057         }
1058         launch_packet_forwarding(start_pkt_forward_on_core);
1059 }
1060
1061 void
1062 stop_packet_forwarding(void)
1063 {
1064         struct rte_eth_stats stats;
1065         struct rte_port *port;
1066         port_fwd_end_t  port_fwd_end;
1067         int i;
1068         portid_t   pt_id;
1069         streamid_t sm_id;
1070         lcoreid_t  lc_id;
1071         uint64_t total_recv;
1072         uint64_t total_xmit;
1073         uint64_t total_rx_dropped;
1074         uint64_t total_tx_dropped;
1075         uint64_t total_rx_nombuf;
1076         uint64_t tx_dropped;
1077         uint64_t rx_bad_ip_csum;
1078         uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080         uint64_t fwd_cycles;
1081 #endif
1082         static const char *acc_stats_border = "+++++++++++++++";
1083
1084         if (all_ports_started() == 0) {
1085                 printf("Not all ports were started\n");
1086                 return;
1087         }
1088         if (test_done) {
1089                 printf("Packet forwarding not started\n");
1090                 return;
1091         }
1092         printf("Telling cores to stop...");
1093         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094                 fwd_lcores[lc_id]->stopped = 1;
1095         printf("\nWaiting for lcores to finish...\n");
1096         rte_eal_mp_wait_lcore();
1097         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098         if (port_fwd_end != NULL) {
1099                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100                         pt_id = fwd_ports_ids[i];
1101                         (*port_fwd_end)(pt_id);
1102                 }
1103         }
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1105         fwd_cycles = 0;
1106 #endif
1107         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108                 if (cur_fwd_config.nb_fwd_streams >
1109                     cur_fwd_config.nb_fwd_ports) {
1110                         fwd_stream_stats_display(sm_id);
1111                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1113                 } else {
1114                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1115                                 fwd_streams[sm_id];
1116                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1117                                 fwd_streams[sm_id];
1118                 }
1119                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120                 tx_dropped = (uint64_t) (tx_dropped +
1121                                          fwd_streams[sm_id]->fwd_dropped);
1122                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1123
1124                 rx_bad_ip_csum =
1125                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1128                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1129                                                         rx_bad_ip_csum;
1130
1131                 rx_bad_l4_csum =
1132                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1135                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1136                                                         rx_bad_l4_csum;
1137
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139                 fwd_cycles = (uint64_t) (fwd_cycles +
1140                                          fwd_streams[sm_id]->core_cycles);
1141 #endif
1142         }
1143         total_recv = 0;
1144         total_xmit = 0;
1145         total_rx_dropped = 0;
1146         total_tx_dropped = 0;
1147         total_rx_nombuf  = 0;
1148         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149                 pt_id = fwd_ports_ids[i];
1150
1151                 port = &ports[pt_id];
1152                 rte_eth_stats_get(pt_id, &stats);
1153                 stats.ipackets -= port->stats.ipackets;
1154                 port->stats.ipackets = 0;
1155                 stats.opackets -= port->stats.opackets;
1156                 port->stats.opackets = 0;
1157                 stats.ibytes   -= port->stats.ibytes;
1158                 port->stats.ibytes = 0;
1159                 stats.obytes   -= port->stats.obytes;
1160                 port->stats.obytes = 0;
1161                 stats.imissed  -= port->stats.imissed;
1162                 port->stats.imissed = 0;
1163                 stats.oerrors  -= port->stats.oerrors;
1164                 port->stats.oerrors = 0;
1165                 stats.rx_nombuf -= port->stats.rx_nombuf;
1166                 port->stats.rx_nombuf = 0;
1167
1168                 total_recv += stats.ipackets;
1169                 total_xmit += stats.opackets;
1170                 total_rx_dropped += stats.imissed;
1171                 total_tx_dropped += port->tx_dropped;
1172                 total_rx_nombuf  += stats.rx_nombuf;
1173
1174                 fwd_port_stats_display(pt_id, &stats);
1175         }
1176         printf("\n  %s Accumulated forward statistics for all ports"
1177                "%s\n",
1178                acc_stats_border, acc_stats_border);
1179         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1180                "%-"PRIu64"\n"
1181                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1182                "%-"PRIu64"\n",
1183                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1184                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1185         if (total_rx_nombuf > 0)
1186                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1187         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1188                "%s\n",
1189                acc_stats_border, acc_stats_border);
1190 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1191         if (total_recv > 0)
1192                 printf("\n  CPU cycles/packet=%u (total cycles="
1193                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1194                        (unsigned int)(fwd_cycles / total_recv),
1195                        fwd_cycles, total_recv);
1196 #endif
1197         printf("\nDone.\n");
1198         test_done = 1;
1199 }
1200
1201 void
1202 dev_set_link_up(portid_t pid)
1203 {
1204         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1205                 printf("\nSet link up fail.\n");
1206 }
1207
1208 void
1209 dev_set_link_down(portid_t pid)
1210 {
1211         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1212                 printf("\nSet link down fail.\n");
1213 }
1214
1215 static int
1216 all_ports_started(void)
1217 {
1218         portid_t pi;
1219         struct rte_port *port;
1220
1221         FOREACH_PORT(pi, ports) {
1222                 port = &ports[pi];
1223                 /* Check if there is a port which is not started */
1224                 if ((port->port_status != RTE_PORT_STARTED) &&
1225                         (port->slave_flag == 0))
1226                         return 0;
1227         }
1228
1229         /* No port is not started */
1230         return 1;
1231 }
1232
1233 int
1234 all_ports_stopped(void)
1235 {
1236         portid_t pi;
1237         struct rte_port *port;
1238
1239         FOREACH_PORT(pi, ports) {
1240                 port = &ports[pi];
1241                 if ((port->port_status != RTE_PORT_STOPPED) &&
1242                         (port->slave_flag == 0))
1243                         return 0;
1244         }
1245
1246         return 1;
1247 }
1248
1249 int
1250 port_is_started(portid_t port_id)
1251 {
1252         if (port_id_is_invalid(port_id, ENABLED_WARN))
1253                 return 0;
1254
1255         if (ports[port_id].port_status != RTE_PORT_STARTED)
1256                 return 0;
1257
1258         return 1;
1259 }
1260
1261 static int
1262 port_is_closed(portid_t port_id)
1263 {
1264         if (port_id_is_invalid(port_id, ENABLED_WARN))
1265                 return 0;
1266
1267         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1268                 return 0;
1269
1270         return 1;
1271 }
1272
1273 int
1274 start_port(portid_t pid)
1275 {
1276         int diag, need_check_link_status = -1;
1277         portid_t pi;
1278         queueid_t qi;
1279         struct rte_port *port;
1280         struct ether_addr mac_addr;
1281
1282         if (port_id_is_invalid(pid, ENABLED_WARN))
1283                 return 0;
1284
1285         if (init_fwd_streams() < 0) {
1286                 printf("Fail from init_fwd_streams()\n");
1287                 return -1;
1288         }
1289
1290         if(dcb_config)
1291                 dcb_test = 1;
1292         FOREACH_PORT(pi, ports) {
1293                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1294                         continue;
1295
1296                 need_check_link_status = 0;
1297                 port = &ports[pi];
1298                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1299                                                  RTE_PORT_HANDLING) == 0) {
1300                         printf("Port %d is now not stopped\n", pi);
1301                         continue;
1302                 }
1303
1304                 if (port->need_reconfig > 0) {
1305                         port->need_reconfig = 0;
1306
1307                         printf("Configuring Port %d (socket %u)\n", pi,
1308                                         port->socket_id);
1309                         /* configure port */
1310                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1311                                                 &(port->dev_conf));
1312                         if (diag != 0) {
1313                                 if (rte_atomic16_cmpset(&(port->port_status),
1314                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1315                                         printf("Port %d can not be set back "
1316                                                         "to stopped\n", pi);
1317                                 printf("Fail to configure port %d\n", pi);
1318                                 /* try to reconfigure port next time */
1319                                 port->need_reconfig = 1;
1320                                 return -1;
1321                         }
1322                 }
1323                 if (port->need_reconfig_queues > 0) {
1324                         port->need_reconfig_queues = 0;
1325                         /* setup tx queues */
1326                         for (qi = 0; qi < nb_txq; qi++) {
1327                                 if ((numa_support) &&
1328                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1329                                         diag = rte_eth_tx_queue_setup(pi, qi,
1330                                                 nb_txd,txring_numa[pi],
1331                                                 &(port->tx_conf));
1332                                 else
1333                                         diag = rte_eth_tx_queue_setup(pi, qi,
1334                                                 nb_txd,port->socket_id,
1335                                                 &(port->tx_conf));
1336
1337                                 if (diag == 0)
1338                                         continue;
1339
1340                                 /* Fail to setup tx queue, return */
1341                                 if (rte_atomic16_cmpset(&(port->port_status),
1342                                                         RTE_PORT_HANDLING,
1343                                                         RTE_PORT_STOPPED) == 0)
1344                                         printf("Port %d can not be set back "
1345                                                         "to stopped\n", pi);
1346                                 printf("Fail to configure port %d tx queues\n", pi);
1347                                 /* try to reconfigure queues next time */
1348                                 port->need_reconfig_queues = 1;
1349                                 return -1;
1350                         }
1351                         /* setup rx queues */
1352                         for (qi = 0; qi < nb_rxq; qi++) {
1353                                 if ((numa_support) &&
1354                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1355                                         struct rte_mempool * mp =
1356                                                 mbuf_pool_find(rxring_numa[pi]);
1357                                         if (mp == NULL) {
1358                                                 printf("Failed to setup RX queue:"
1359                                                         "No mempool allocation"
1360                                                         "on the socket %d\n",
1361                                                         rxring_numa[pi]);
1362                                                 return -1;
1363                                         }
1364
1365                                         diag = rte_eth_rx_queue_setup(pi, qi,
1366                                              nb_rxd,rxring_numa[pi],
1367                                              &(port->rx_conf),mp);
1368                                 }
1369                                 else
1370                                         diag = rte_eth_rx_queue_setup(pi, qi,
1371                                              nb_rxd,port->socket_id,
1372                                              &(port->rx_conf),
1373                                              mbuf_pool_find(port->socket_id));
1374
1375                                 if (diag == 0)
1376                                         continue;
1377
1378
1379                                 /* Fail to setup rx queue, return */
1380                                 if (rte_atomic16_cmpset(&(port->port_status),
1381                                                         RTE_PORT_HANDLING,
1382                                                         RTE_PORT_STOPPED) == 0)
1383                                         printf("Port %d can not be set back "
1384                                                         "to stopped\n", pi);
1385                                 printf("Fail to configure port %d rx queues\n", pi);
1386                                 /* try to reconfigure queues next time */
1387                                 port->need_reconfig_queues = 1;
1388                                 return -1;
1389                         }
1390                 }
1391                 /* start port */
1392                 if (rte_eth_dev_start(pi) < 0) {
1393                         printf("Fail to start port %d\n", pi);
1394
1395                         /* Fail to setup rx queue, return */
1396                         if (rte_atomic16_cmpset(&(port->port_status),
1397                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1398                                 printf("Port %d can not be set back to "
1399                                                         "stopped\n", pi);
1400                         continue;
1401                 }
1402
1403                 if (rte_atomic16_cmpset(&(port->port_status),
1404                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1405                         printf("Port %d can not be set into started\n", pi);
1406
1407                 rte_eth_macaddr_get(pi, &mac_addr);
1408                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1409                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1410                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1411                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1412
1413                 /* at least one port started, need checking link status */
1414                 need_check_link_status = 1;
1415         }
1416
1417         if (need_check_link_status == 1 && !no_link_check)
1418                 check_all_ports_link_status(RTE_PORT_ALL);
1419         else if (need_check_link_status == 0)
1420                 printf("Please stop the ports first\n");
1421
1422         printf("Done\n");
1423         return 0;
1424 }
1425
1426 void
1427 stop_port(portid_t pid)
1428 {
1429         portid_t pi;
1430         struct rte_port *port;
1431         int need_check_link_status = 0;
1432
1433         if (dcb_test) {
1434                 dcb_test = 0;
1435                 dcb_config = 0;
1436         }
1437
1438         if (port_id_is_invalid(pid, ENABLED_WARN))
1439                 return;
1440
1441         printf("Stopping ports...\n");
1442
1443         FOREACH_PORT(pi, ports) {
1444                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1445                         continue;
1446
1447                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1448                         printf("Please remove port %d from forwarding configuration.\n", pi);
1449                         continue;
1450                 }
1451
1452                 port = &ports[pi];
1453                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1454                                                 RTE_PORT_HANDLING) == 0)
1455                         continue;
1456
1457                 rte_eth_dev_stop(pi);
1458
1459                 if (rte_atomic16_cmpset(&(port->port_status),
1460                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1461                         printf("Port %d can not be set into stopped\n", pi);
1462                 need_check_link_status = 1;
1463         }
1464         if (need_check_link_status && !no_link_check)
1465                 check_all_ports_link_status(RTE_PORT_ALL);
1466
1467         printf("Done\n");
1468 }
1469
1470 void
1471 close_port(portid_t pid)
1472 {
1473         portid_t pi;
1474         struct rte_port *port;
1475
1476         if (port_id_is_invalid(pid, ENABLED_WARN))
1477                 return;
1478
1479         printf("Closing ports...\n");
1480
1481         FOREACH_PORT(pi, ports) {
1482                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1483                         continue;
1484
1485                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1486                         printf("Please remove port %d from forwarding configuration.\n", pi);
1487                         continue;
1488                 }
1489
1490                 port = &ports[pi];
1491                 if (rte_atomic16_cmpset(&(port->port_status),
1492                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1493                         printf("Port %d is already closed\n", pi);
1494                         continue;
1495                 }
1496
1497                 if (rte_atomic16_cmpset(&(port->port_status),
1498                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1499                         printf("Port %d is now not stopped\n", pi);
1500                         continue;
1501                 }
1502
1503                 rte_eth_dev_close(pi);
1504
1505                 if (rte_atomic16_cmpset(&(port->port_status),
1506                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1507                         printf("Port %d cannot be set to closed\n", pi);
1508         }
1509
1510         printf("Done\n");
1511 }
1512
1513 void
1514 attach_port(char *identifier)
1515 {
1516         portid_t pi = 0;
1517
1518         printf("Attaching a new port...\n");
1519
1520         if (identifier == NULL) {
1521                 printf("Invalid parameters are specified\n");
1522                 return;
1523         }
1524
1525         if (rte_eth_dev_attach(identifier, &pi))
1526                 return;
1527
1528         ports[pi].enabled = 1;
1529         reconfig(pi, rte_eth_dev_socket_id(pi));
1530         rte_eth_promiscuous_enable(pi);
1531
1532         nb_ports = rte_eth_dev_count();
1533
1534         ports[pi].port_status = RTE_PORT_STOPPED;
1535
1536         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1537         printf("Done\n");
1538 }
1539
1540 void
1541 detach_port(uint8_t port_id)
1542 {
1543         char name[RTE_ETH_NAME_MAX_LEN];
1544
1545         printf("Detaching a port...\n");
1546
1547         if (!port_is_closed(port_id)) {
1548                 printf("Please close port first\n");
1549                 return;
1550         }
1551
1552         if (rte_eth_dev_detach(port_id, name))
1553                 return;
1554
1555         ports[port_id].enabled = 0;
1556         nb_ports = rte_eth_dev_count();
1557
1558         printf("Port '%s' is detached. Now total ports is %d\n",
1559                         name, nb_ports);
1560         printf("Done\n");
1561         return;
1562 }
1563
1564 void
1565 pmd_test_exit(void)
1566 {
1567         portid_t pt_id;
1568
1569         if (test_done == 0)
1570                 stop_packet_forwarding();
1571
1572         if (ports != NULL) {
1573                 no_link_check = 1;
1574                 FOREACH_PORT(pt_id, ports) {
1575                         printf("\nShutting down port %d...\n", pt_id);
1576                         fflush(stdout);
1577                         stop_port(pt_id);
1578                         close_port(pt_id);
1579                 }
1580         }
1581         printf("\nBye...\n");
1582 }
1583
1584 typedef void (*cmd_func_t)(void);
1585 struct pmd_test_command {
1586         const char *cmd_name;
1587         cmd_func_t cmd_func;
1588 };
1589
1590 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1591
1592 /* Check the link status of all ports in up to 9s, and print them finally */
1593 static void
1594 check_all_ports_link_status(uint32_t port_mask)
1595 {
1596 #define CHECK_INTERVAL 100 /* 100ms */
1597 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1598         uint8_t portid, count, all_ports_up, print_flag = 0;
1599         struct rte_eth_link link;
1600
1601         printf("Checking link statuses...\n");
1602         fflush(stdout);
1603         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1604                 all_ports_up = 1;
1605                 FOREACH_PORT(portid, ports) {
1606                         if ((port_mask & (1 << portid)) == 0)
1607                                 continue;
1608                         memset(&link, 0, sizeof(link));
1609                         rte_eth_link_get_nowait(portid, &link);
1610                         /* print link status if flag set */
1611                         if (print_flag == 1) {
1612                                 if (link.link_status)
1613                                         printf("Port %d Link Up - speed %u "
1614                                                 "Mbps - %s\n", (uint8_t)portid,
1615                                                 (unsigned)link.link_speed,
1616                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1617                                         ("full-duplex") : ("half-duplex\n"));
1618                                 else
1619                                         printf("Port %d Link Down\n",
1620                                                 (uint8_t)portid);
1621                                 continue;
1622                         }
1623                         /* clear all_ports_up flag if any link down */
1624                         if (link.link_status == ETH_LINK_DOWN) {
1625                                 all_ports_up = 0;
1626                                 break;
1627                         }
1628                 }
1629                 /* after finally printing all link status, get out */
1630                 if (print_flag == 1)
1631                         break;
1632
1633                 if (all_ports_up == 0) {
1634                         fflush(stdout);
1635                         rte_delay_ms(CHECK_INTERVAL);
1636                 }
1637
1638                 /* set the print_flag if all ports up or timeout */
1639                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1640                         print_flag = 1;
1641                 }
1642         }
1643 }
1644
1645 static int
1646 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1647 {
1648         uint16_t i;
1649         int diag;
1650         uint8_t mapping_found = 0;
1651
1652         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1653                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1654                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1655                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1656                                         tx_queue_stats_mappings[i].queue_id,
1657                                         tx_queue_stats_mappings[i].stats_counter_id);
1658                         if (diag != 0)
1659                                 return diag;
1660                         mapping_found = 1;
1661                 }
1662         }
1663         if (mapping_found)
1664                 port->tx_queue_stats_mapping_enabled = 1;
1665         return 0;
1666 }
1667
1668 static int
1669 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1670 {
1671         uint16_t i;
1672         int diag;
1673         uint8_t mapping_found = 0;
1674
1675         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1676                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1677                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1678                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1679                                         rx_queue_stats_mappings[i].queue_id,
1680                                         rx_queue_stats_mappings[i].stats_counter_id);
1681                         if (diag != 0)
1682                                 return diag;
1683                         mapping_found = 1;
1684                 }
1685         }
1686         if (mapping_found)
1687                 port->rx_queue_stats_mapping_enabled = 1;
1688         return 0;
1689 }
1690
1691 static void
1692 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1693 {
1694         int diag = 0;
1695
1696         diag = set_tx_queue_stats_mapping_registers(pi, port);
1697         if (diag != 0) {
1698                 if (diag == -ENOTSUP) {
1699                         port->tx_queue_stats_mapping_enabled = 0;
1700                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1701                 }
1702                 else
1703                         rte_exit(EXIT_FAILURE,
1704                                         "set_tx_queue_stats_mapping_registers "
1705                                         "failed for port id=%d diag=%d\n",
1706                                         pi, diag);
1707         }
1708
1709         diag = set_rx_queue_stats_mapping_registers(pi, port);
1710         if (diag != 0) {
1711                 if (diag == -ENOTSUP) {
1712                         port->rx_queue_stats_mapping_enabled = 0;
1713                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1714                 }
1715                 else
1716                         rte_exit(EXIT_FAILURE,
1717                                         "set_rx_queue_stats_mapping_registers "
1718                                         "failed for port id=%d diag=%d\n",
1719                                         pi, diag);
1720         }
1721 }
1722
1723 static void
1724 rxtx_port_config(struct rte_port *port)
1725 {
1726         port->rx_conf = port->dev_info.default_rxconf;
1727         port->tx_conf = port->dev_info.default_txconf;
1728
1729         /* Check if any RX/TX parameters have been passed */
1730         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1731                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1732
1733         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1734                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1735
1736         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1737                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1738
1739         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1740                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1741
1742         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1743                 port->rx_conf.rx_drop_en = rx_drop_en;
1744
1745         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1746                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1747
1748         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1749                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1750
1751         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1752                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1753
1754         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1755                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1756
1757         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1758                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1759
1760         if (txq_flags != RTE_PMD_PARAM_UNSET)
1761                 port->tx_conf.txq_flags = txq_flags;
1762 }
1763
1764 void
1765 init_port_config(void)
1766 {
1767         portid_t pid;
1768         struct rte_port *port;
1769
1770         FOREACH_PORT(pid, ports) {
1771                 port = &ports[pid];
1772                 port->dev_conf.rxmode = rx_mode;
1773                 port->dev_conf.fdir_conf = fdir_conf;
1774                 if (nb_rxq > 1) {
1775                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1776                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1777                 } else {
1778                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1779                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1780                 }
1781
1782                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1783                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1784                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1785                         else
1786                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1787                 }
1788
1789                 if (port->dev_info.max_vfs != 0) {
1790                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1791                                 port->dev_conf.rxmode.mq_mode =
1792                                         ETH_MQ_RX_VMDQ_RSS;
1793                         else
1794                                 port->dev_conf.rxmode.mq_mode =
1795                                         ETH_MQ_RX_NONE;
1796
1797                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1798                 }
1799
1800                 rxtx_port_config(port);
1801
1802                 rte_eth_macaddr_get(pid, &port->eth_addr);
1803
1804                 map_port_queue_stats_mapping_registers(pid, port);
1805 #ifdef RTE_NIC_BYPASS
1806                 rte_eth_dev_bypass_init(pid);
1807 #endif
1808         }
1809 }
1810
1811 void set_port_slave_flag(portid_t slave_pid)
1812 {
1813         struct rte_port *port;
1814
1815         port = &ports[slave_pid];
1816         port->slave_flag = 1;
1817 }
1818
1819 void clear_port_slave_flag(portid_t slave_pid)
1820 {
1821         struct rte_port *port;
1822
1823         port = &ports[slave_pid];
1824         port->slave_flag = 0;
1825 }
1826
1827 const uint16_t vlan_tags[] = {
1828                 0,  1,  2,  3,  4,  5,  6,  7,
1829                 8,  9, 10, 11,  12, 13, 14, 15,
1830                 16, 17, 18, 19, 20, 21, 22, 23,
1831                 24, 25, 26, 27, 28, 29, 30, 31
1832 };
1833
1834 static  int
1835 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1836                  enum dcb_mode_enable dcb_mode,
1837                  enum rte_eth_nb_tcs num_tcs,
1838                  uint8_t pfc_en)
1839 {
1840         uint8_t i;
1841
1842         /*
1843          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1844          * given above, and the number of traffic classes available for use.
1845          */
1846         if (dcb_mode == DCB_VT_ENABLED) {
1847                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1848                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1849                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1850                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1851
1852                 /* VMDQ+DCB RX and TX configrations */
1853                 vmdq_rx_conf->enable_default_pool = 0;
1854                 vmdq_rx_conf->default_pool = 0;
1855                 vmdq_rx_conf->nb_queue_pools =
1856                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1857                 vmdq_tx_conf->nb_queue_pools =
1858                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1859
1860                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1861                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1862                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1863                         vmdq_rx_conf->pool_map[i].pools =
1864                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1865                 }
1866                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1867                         vmdq_rx_conf->dcb_tc[i] = i;
1868                         vmdq_tx_conf->dcb_tc[i] = i;
1869                 }
1870
1871                 /* set DCB mode of RX and TX of multiple queues */
1872                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1873                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1874         } else {
1875                 struct rte_eth_dcb_rx_conf *rx_conf =
1876                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1877                 struct rte_eth_dcb_tx_conf *tx_conf =
1878                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1879
1880                 rx_conf->nb_tcs = num_tcs;
1881                 tx_conf->nb_tcs = num_tcs;
1882
1883                 for (i = 0; i < num_tcs; i++) {
1884                         rx_conf->dcb_tc[i] = i;
1885                         tx_conf->dcb_tc[i] = i;
1886                 }
1887                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1888                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1889                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1890         }
1891
1892         if (pfc_en)
1893                 eth_conf->dcb_capability_en =
1894                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1895         else
1896                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1897
1898         return 0;
1899 }
1900
1901 int
1902 init_port_dcb_config(portid_t pid,
1903                      enum dcb_mode_enable dcb_mode,
1904                      enum rte_eth_nb_tcs num_tcs,
1905                      uint8_t pfc_en)
1906 {
1907         struct rte_eth_conf port_conf;
1908         struct rte_eth_dev_info dev_info;
1909         struct rte_port *rte_port;
1910         int retval;
1911         uint16_t i;
1912
1913         rte_eth_dev_info_get(pid, &dev_info);
1914
1915         /* If dev_info.vmdq_pool_base is greater than 0,
1916          * the queue id of vmdq pools is started after pf queues.
1917          */
1918         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1919                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1920                         " for port %d.", pid);
1921                 return -1;
1922         }
1923
1924         /* Assume the ports in testpmd have the same dcb capability
1925          * and has the same number of rxq and txq in dcb mode
1926          */
1927         if (dcb_mode == DCB_VT_ENABLED) {
1928                 nb_rxq = dev_info.max_rx_queues;
1929                 nb_txq = dev_info.max_tx_queues;
1930         } else {
1931                 /*if vt is disabled, use all pf queues */
1932                 if (dev_info.vmdq_pool_base == 0) {
1933                         nb_rxq = dev_info.max_rx_queues;
1934                         nb_txq = dev_info.max_tx_queues;
1935                 } else {
1936                         nb_rxq = (queueid_t)num_tcs;
1937                         nb_txq = (queueid_t)num_tcs;
1938
1939                 }
1940         }
1941         rx_free_thresh = 64;
1942
1943         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1944         /* Enter DCB configuration status */
1945         dcb_config = 1;
1946
1947         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1948         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1949         if (retval < 0)
1950                 return retval;
1951
1952         rte_port = &ports[pid];
1953         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1954
1955         rxtx_port_config(rte_port);
1956         /* VLAN filter */
1957         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1958         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1959                 rx_vft_set(pid, vlan_tags[i], 1);
1960
1961         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1962         map_port_queue_stats_mapping_registers(pid, rte_port);
1963
1964         rte_port->dcb_flag = 1;
1965
1966         return 0;
1967 }
1968
1969 static void
1970 init_port(void)
1971 {
1972         portid_t pid;
1973
1974         /* Configuration of Ethernet ports. */
1975         ports = rte_zmalloc("testpmd: ports",
1976                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1977                             RTE_CACHE_LINE_SIZE);
1978         if (ports == NULL) {
1979                 rte_exit(EXIT_FAILURE,
1980                                 "rte_zmalloc(%d struct rte_port) failed\n",
1981                                 RTE_MAX_ETHPORTS);
1982         }
1983
1984         /* enabled allocated ports */
1985         for (pid = 0; pid < nb_ports; pid++)
1986                 ports[pid].enabled = 1;
1987 }
1988
1989 static void
1990 force_quit(void)
1991 {
1992         pmd_test_exit();
1993         prompt_exit();
1994 }
1995
1996 static void
1997 signal_handler(int signum)
1998 {
1999         if (signum == SIGINT || signum == SIGTERM) {
2000                 printf("\nSignal %d received, preparing to exit...\n",
2001                                 signum);
2002                 force_quit();
2003                 /* exit with the expected status */
2004                 signal(signum, SIG_DFL);
2005                 kill(getpid(), signum);
2006         }
2007 }
2008
2009 int
2010 main(int argc, char** argv)
2011 {
2012         int  diag;
2013         uint8_t port_id;
2014
2015         signal(SIGINT, signal_handler);
2016         signal(SIGTERM, signal_handler);
2017
2018         diag = rte_eal_init(argc, argv);
2019         if (diag < 0)
2020                 rte_panic("Cannot init EAL\n");
2021
2022         nb_ports = (portid_t) rte_eth_dev_count();
2023         if (nb_ports == 0)
2024                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2025
2026         /* allocate port structures, and init them */
2027         init_port();
2028
2029         set_def_fwd_config();
2030         if (nb_lcores == 0)
2031                 rte_panic("Empty set of forwarding logical cores - check the "
2032                           "core mask supplied in the command parameters\n");
2033
2034         argc -= diag;
2035         argv += diag;
2036         if (argc > 1)
2037                 launch_args_parse(argc, argv);
2038
2039         if (!nb_rxq && !nb_txq)
2040                 printf("Warning: Either rx or tx queues should be non-zero\n");
2041
2042         if (nb_rxq > 1 && nb_rxq > nb_txq)
2043                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2044                        "but nb_txq=%d will prevent to fully test it.\n",
2045                        nb_rxq, nb_txq);
2046
2047         init_config();
2048         if (start_port(RTE_PORT_ALL) != 0)
2049                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2050
2051         /* set all ports to promiscuous mode by default */
2052         FOREACH_PORT(port_id, ports)
2053                 rte_eth_promiscuous_enable(port_id);
2054
2055 #ifdef RTE_LIBRTE_CMDLINE
2056         if (interactive == 1) {
2057                 if (auto_start) {
2058                         printf("Start automatic packet forwarding\n");
2059                         start_packet_forwarding(0);
2060                 }
2061                 prompt();
2062         } else
2063 #endif
2064         {
2065                 char c;
2066                 int rc;
2067
2068                 printf("No commandline core given, start packet forwarding\n");
2069                 start_packet_forwarding(0);
2070                 printf("Press enter to exit\n");
2071                 rc = read(0, &c, 1);
2072                 pmd_test_exit();
2073                 if (rc < 0)
2074                         return 1;
2075         }
2076
2077         return 0;
2078 }