app/testpmd: fix numa socket detection
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421                 (unsigned) mb_mempool_cache,
422                 sizeof(struct rte_pktmbuf_pool_private),
423                 rte_pktmbuf_pool_init, NULL,
424                 rte_pktmbuf_init, NULL,
425                 socket_id, 0);
426
427
428
429 #else
430         if (mp_anon != 0)
431                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
432                                     (unsigned) mb_mempool_cache,
433                                     sizeof(struct rte_pktmbuf_pool_private),
434                                     rte_pktmbuf_pool_init, NULL,
435                                     rte_pktmbuf_init, NULL,
436                                     socket_id, 0);
437         else
438                 /* wrapper to rte_mempool_create() */
439                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
441
442 #endif
443
444         if (rte_mp == NULL) {
445                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
446                                                 "failed\n", socket_id);
447         } else if (verbose_level > 0) {
448                 rte_mempool_dump(stdout, rte_mp);
449         }
450 }
451
452 /*
453  * Check given socket id is valid or not with NUMA mode,
454  * if valid, return 0, else return -1
455  */
456 static int
457 check_socket_id(const unsigned int socket_id)
458 {
459         static int warning_once = 0;
460
461         if (socket_id >= max_socket) {
462                 if (!warning_once && numa_support)
463                         printf("Warning: NUMA should be configured manually by"
464                                " using --port-numa-config and"
465                                " --ring-numa-config parameters along with"
466                                " --numa.\n");
467                 warning_once = 1;
468                 return -1;
469         }
470         return 0;
471 }
472
473 static void
474 init_config(void)
475 {
476         portid_t pid;
477         struct rte_port *port;
478         struct rte_mempool *mbp;
479         unsigned int nb_mbuf_per_pool;
480         lcoreid_t  lc_id;
481         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
482
483         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
484         /* Configuration of logical cores. */
485         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
486                                 sizeof(struct fwd_lcore *) * nb_lcores,
487                                 RTE_CACHE_LINE_SIZE);
488         if (fwd_lcores == NULL) {
489                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
490                                                         "failed\n", nb_lcores);
491         }
492         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
493                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
494                                                sizeof(struct fwd_lcore),
495                                                RTE_CACHE_LINE_SIZE);
496                 if (fwd_lcores[lc_id] == NULL) {
497                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
498                                                                 "failed\n");
499                 }
500                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
501         }
502
503         /*
504          * Create pools of mbuf.
505          * If NUMA support is disabled, create a single pool of mbuf in
506          * socket 0 memory by default.
507          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
508          *
509          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
510          * nb_txd can be configured at run time.
511          */
512         if (param_total_num_mbufs)
513                 nb_mbuf_per_pool = param_total_num_mbufs;
514         else {
515                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
516                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
517
518                 if (!numa_support)
519                         nb_mbuf_per_pool =
520                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
521         }
522
523         if (!numa_support) {
524                 if (socket_num == UMA_NO_CONFIG)
525                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
526                 else
527                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
528                                                  socket_num);
529         }
530
531         FOREACH_PORT(pid, ports) {
532                 port = &ports[pid];
533                 rte_eth_dev_info_get(pid, &port->dev_info);
534
535                 if (numa_support) {
536                         if (port_numa[pid] != NUMA_NO_CONFIG)
537                                 port_per_socket[port_numa[pid]]++;
538                         else {
539                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
540
541                                 /* if socket_id is invalid, set to 0 */
542                                 if (check_socket_id(socket_id) < 0)
543                                         socket_id = 0;
544                                 port_per_socket[socket_id]++;
545                         }
546                 }
547
548                 /* set flag to initialize port/queue */
549                 port->need_reconfig = 1;
550                 port->need_reconfig_queues = 1;
551         }
552
553         if (numa_support) {
554                 uint8_t i;
555                 unsigned int nb_mbuf;
556
557                 if (param_total_num_mbufs)
558                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
559
560                 for (i = 0; i < max_socket; i++) {
561                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
562                         if (nb_mbuf)
563                                 mbuf_pool_create(mbuf_data_size,
564                                                 nb_mbuf,i);
565                 }
566         }
567         init_port_config();
568
569         /*
570          * Records which Mbuf pool to use by each logical core, if needed.
571          */
572         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
573                 mbp = mbuf_pool_find(
574                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
575
576                 if (mbp == NULL)
577                         mbp = mbuf_pool_find(0);
578                 fwd_lcores[lc_id]->mbp = mbp;
579         }
580
581         /* Configuration of packet forwarding streams. */
582         if (init_fwd_streams() < 0)
583                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
584 }
585
586
587 void
588 reconfig(portid_t new_port_id, unsigned socket_id)
589 {
590         struct rte_port *port;
591
592         /* Reconfiguration of Ethernet ports. */
593         port = &ports[new_port_id];
594         rte_eth_dev_info_get(new_port_id, &port->dev_info);
595
596         /* set flag to initialize port/queue */
597         port->need_reconfig = 1;
598         port->need_reconfig_queues = 1;
599         port->socket_id = socket_id;
600
601         init_port_config();
602 }
603
604
605 int
606 init_fwd_streams(void)
607 {
608         portid_t pid;
609         struct rte_port *port;
610         streamid_t sm_id, nb_fwd_streams_new;
611
612         /* set socket id according to numa or not */
613         FOREACH_PORT(pid, ports) {
614                 port = &ports[pid];
615                 if (nb_rxq > port->dev_info.max_rx_queues) {
616                         printf("Fail: nb_rxq(%d) is greater than "
617                                 "max_rx_queues(%d)\n", nb_rxq,
618                                 port->dev_info.max_rx_queues);
619                         return -1;
620                 }
621                 if (nb_txq > port->dev_info.max_tx_queues) {
622                         printf("Fail: nb_txq(%d) is greater than "
623                                 "max_tx_queues(%d)\n", nb_txq,
624                                 port->dev_info.max_tx_queues);
625                         return -1;
626                 }
627                 if (numa_support) {
628                         if (port_numa[pid] != NUMA_NO_CONFIG)
629                                 port->socket_id = port_numa[pid];
630                         else {
631                                 port->socket_id = rte_eth_dev_socket_id(pid);
632
633                                 /* if socket_id is invalid, set to 0 */
634                                 if (check_socket_id(port->socket_id) < 0)
635                                         port->socket_id = 0;
636                         }
637                 }
638                 else {
639                         if (socket_num == UMA_NO_CONFIG)
640                                 port->socket_id = 0;
641                         else
642                                 port->socket_id = socket_num;
643                 }
644         }
645
646         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
647         if (nb_fwd_streams_new == nb_fwd_streams)
648                 return 0;
649         /* clear the old */
650         if (fwd_streams != NULL) {
651                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
652                         if (fwd_streams[sm_id] == NULL)
653                                 continue;
654                         rte_free(fwd_streams[sm_id]);
655                         fwd_streams[sm_id] = NULL;
656                 }
657                 rte_free(fwd_streams);
658                 fwd_streams = NULL;
659         }
660
661         /* init new */
662         nb_fwd_streams = nb_fwd_streams_new;
663         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
664                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
665         if (fwd_streams == NULL)
666                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
667                                                 "failed\n", nb_fwd_streams);
668
669         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
671                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
672                 if (fwd_streams[sm_id] == NULL)
673                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
674                                                                 " failed\n");
675         }
676
677         return 0;
678 }
679
680 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
681 static void
682 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
683 {
684         unsigned int total_burst;
685         unsigned int nb_burst;
686         unsigned int burst_stats[3];
687         uint16_t pktnb_stats[3];
688         uint16_t nb_pkt;
689         int burst_percent[3];
690
691         /*
692          * First compute the total number of packet bursts and the
693          * two highest numbers of bursts of the same number of packets.
694          */
695         total_burst = 0;
696         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
697         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
698         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
699                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
700                 if (nb_burst == 0)
701                         continue;
702                 total_burst += nb_burst;
703                 if (nb_burst > burst_stats[0]) {
704                         burst_stats[1] = burst_stats[0];
705                         pktnb_stats[1] = pktnb_stats[0];
706                         burst_stats[0] = nb_burst;
707                         pktnb_stats[0] = nb_pkt;
708                 }
709         }
710         if (total_burst == 0)
711                 return;
712         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
713         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
714                burst_percent[0], (int) pktnb_stats[0]);
715         if (burst_stats[0] == total_burst) {
716                 printf("]\n");
717                 return;
718         }
719         if (burst_stats[0] + burst_stats[1] == total_burst) {
720                 printf(" + %d%% of %d pkts]\n",
721                        100 - burst_percent[0], pktnb_stats[1]);
722                 return;
723         }
724         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
725         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
726         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
727                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
728                 return;
729         }
730         printf(" + %d%% of %d pkts + %d%% of others]\n",
731                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
732 }
733 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
734
735 static void
736 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
737 {
738         struct rte_port *port;
739         uint8_t i;
740
741         static const char *fwd_stats_border = "----------------------";
742
743         port = &ports[port_id];
744         printf("\n  %s Forward statistics for port %-2d %s\n",
745                fwd_stats_border, port_id, fwd_stats_border);
746
747         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
748                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
749                        "%-"PRIu64"\n",
750                        stats->ipackets, stats->imissed,
751                        (uint64_t) (stats->ipackets + stats->imissed));
752
753                 if (cur_fwd_eng == &csum_fwd_engine)
754                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
755                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
756                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
757                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
758                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
759                 }
760
761                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
762                        "%-"PRIu64"\n",
763                        stats->opackets, port->tx_dropped,
764                        (uint64_t) (stats->opackets + port->tx_dropped));
765         }
766         else {
767                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
768                        "%14"PRIu64"\n",
769                        stats->ipackets, stats->imissed,
770                        (uint64_t) (stats->ipackets + stats->imissed));
771
772                 if (cur_fwd_eng == &csum_fwd_engine)
773                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
774                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
775                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
776                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
777                         printf("  RX-nombufs:             %14"PRIu64"\n",
778                                stats->rx_nombuf);
779                 }
780
781                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
782                        "%14"PRIu64"\n",
783                        stats->opackets, port->tx_dropped,
784                        (uint64_t) (stats->opackets + port->tx_dropped));
785         }
786
787 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
788         if (port->rx_stream)
789                 pkt_burst_stats_display("RX",
790                         &port->rx_stream->rx_burst_stats);
791         if (port->tx_stream)
792                 pkt_burst_stats_display("TX",
793                         &port->tx_stream->tx_burst_stats);
794 #endif
795
796         if (port->rx_queue_stats_mapping_enabled) {
797                 printf("\n");
798                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
799                         printf("  Stats reg %2d RX-packets:%14"PRIu64
800                                "     RX-errors:%14"PRIu64
801                                "    RX-bytes:%14"PRIu64"\n",
802                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
803                 }
804                 printf("\n");
805         }
806         if (port->tx_queue_stats_mapping_enabled) {
807                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
808                         printf("  Stats reg %2d TX-packets:%14"PRIu64
809                                "                                 TX-bytes:%14"PRIu64"\n",
810                                i, stats->q_opackets[i], stats->q_obytes[i]);
811                 }
812         }
813
814         printf("  %s--------------------------------%s\n",
815                fwd_stats_border, fwd_stats_border);
816 }
817
818 static void
819 fwd_stream_stats_display(streamid_t stream_id)
820 {
821         struct fwd_stream *fs;
822         static const char *fwd_top_stats_border = "-------";
823
824         fs = fwd_streams[stream_id];
825         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
826             (fs->fwd_dropped == 0))
827                 return;
828         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
829                "TX Port=%2d/Queue=%2d %s\n",
830                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
831                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
832         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
833                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
834
835         /* if checksum mode */
836         if (cur_fwd_eng == &csum_fwd_engine) {
837                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
838                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
839         }
840
841 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
842         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
843         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
844 #endif
845 }
846
847 static void
848 flush_fwd_rx_queues(void)
849 {
850         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
851         portid_t  rxp;
852         portid_t port_id;
853         queueid_t rxq;
854         uint16_t  nb_rx;
855         uint16_t  i;
856         uint8_t   j;
857
858         for (j = 0; j < 2; j++) {
859                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
860                         for (rxq = 0; rxq < nb_rxq; rxq++) {
861                                 port_id = fwd_ports_ids[rxp];
862                                 do {
863                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
864                                                 pkts_burst, MAX_PKT_BURST);
865                                         for (i = 0; i < nb_rx; i++)
866                                                 rte_pktmbuf_free(pkts_burst[i]);
867                                 } while (nb_rx > 0);
868                         }
869                 }
870                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
871         }
872 }
873
874 static void
875 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
876 {
877         struct fwd_stream **fsm;
878         streamid_t nb_fs;
879         streamid_t sm_id;
880
881         fsm = &fwd_streams[fc->stream_idx];
882         nb_fs = fc->stream_nb;
883         do {
884                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
885                         (*pkt_fwd)(fsm[sm_id]);
886         } while (! fc->stopped);
887 }
888
889 static int
890 start_pkt_forward_on_core(void *fwd_arg)
891 {
892         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
893                              cur_fwd_config.fwd_eng->packet_fwd);
894         return 0;
895 }
896
897 /*
898  * Run the TXONLY packet forwarding engine to send a single burst of packets.
899  * Used to start communication flows in network loopback test configurations.
900  */
901 static int
902 run_one_txonly_burst_on_core(void *fwd_arg)
903 {
904         struct fwd_lcore *fwd_lc;
905         struct fwd_lcore tmp_lcore;
906
907         fwd_lc = (struct fwd_lcore *) fwd_arg;
908         tmp_lcore = *fwd_lc;
909         tmp_lcore.stopped = 1;
910         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
911         return 0;
912 }
913
914 /*
915  * Launch packet forwarding:
916  *     - Setup per-port forwarding context.
917  *     - launch logical cores with their forwarding configuration.
918  */
919 static void
920 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
921 {
922         port_fwd_begin_t port_fwd_begin;
923         unsigned int i;
924         unsigned int lc_id;
925         int diag;
926
927         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
928         if (port_fwd_begin != NULL) {
929                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
930                         (*port_fwd_begin)(fwd_ports_ids[i]);
931         }
932         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
933                 lc_id = fwd_lcores_cpuids[i];
934                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
935                         fwd_lcores[i]->stopped = 0;
936                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
937                                                      fwd_lcores[i], lc_id);
938                         if (diag != 0)
939                                 printf("launch lcore %u failed - diag=%d\n",
940                                        lc_id, diag);
941                 }
942         }
943 }
944
945 /*
946  * Launch packet forwarding configuration.
947  */
948 void
949 start_packet_forwarding(int with_tx_first)
950 {
951         port_fwd_begin_t port_fwd_begin;
952         port_fwd_end_t  port_fwd_end;
953         struct rte_port *port;
954         unsigned int i;
955         portid_t   pt_id;
956         streamid_t sm_id;
957
958         if (all_ports_started() == 0) {
959                 printf("Not all ports were started\n");
960                 return;
961         }
962         if (test_done == 0) {
963                 printf("Packet forwarding already started\n");
964                 return;
965         }
966         if(dcb_test) {
967                 for (i = 0; i < nb_fwd_ports; i++) {
968                         pt_id = fwd_ports_ids[i];
969                         port = &ports[pt_id];
970                         if (!port->dcb_flag) {
971                                 printf("In DCB mode, all forwarding ports must "
972                                        "be configured in this mode.\n");
973                                 return;
974                         }
975                 }
976                 if (nb_fwd_lcores == 1) {
977                         printf("In DCB mode,the nb forwarding cores "
978                                "should be larger than 1.\n");
979                         return;
980                 }
981         }
982         test_done = 0;
983
984         if(!no_flush_rx)
985                 flush_fwd_rx_queues();
986
987         fwd_config_setup();
988         rxtx_config_display();
989
990         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
991                 pt_id = fwd_ports_ids[i];
992                 port = &ports[pt_id];
993                 rte_eth_stats_get(pt_id, &port->stats);
994                 port->tx_dropped = 0;
995
996                 map_port_queue_stats_mapping_registers(pt_id, port);
997         }
998         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
999                 fwd_streams[sm_id]->rx_packets = 0;
1000                 fwd_streams[sm_id]->tx_packets = 0;
1001                 fwd_streams[sm_id]->fwd_dropped = 0;
1002                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1003                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1004
1005 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1006                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1007                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1008                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1009                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1010 #endif
1011 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1012                 fwd_streams[sm_id]->core_cycles = 0;
1013 #endif
1014         }
1015         if (with_tx_first) {
1016                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1017                 if (port_fwd_begin != NULL) {
1018                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1019                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1020                 }
1021                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1022                 rte_eal_mp_wait_lcore();
1023                 port_fwd_end = tx_only_engine.port_fwd_end;
1024                 if (port_fwd_end != NULL) {
1025                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1026                                 (*port_fwd_end)(fwd_ports_ids[i]);
1027                 }
1028         }
1029         launch_packet_forwarding(start_pkt_forward_on_core);
1030 }
1031
1032 void
1033 stop_packet_forwarding(void)
1034 {
1035         struct rte_eth_stats stats;
1036         struct rte_port *port;
1037         port_fwd_end_t  port_fwd_end;
1038         int i;
1039         portid_t   pt_id;
1040         streamid_t sm_id;
1041         lcoreid_t  lc_id;
1042         uint64_t total_recv;
1043         uint64_t total_xmit;
1044         uint64_t total_rx_dropped;
1045         uint64_t total_tx_dropped;
1046         uint64_t total_rx_nombuf;
1047         uint64_t tx_dropped;
1048         uint64_t rx_bad_ip_csum;
1049         uint64_t rx_bad_l4_csum;
1050 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1051         uint64_t fwd_cycles;
1052 #endif
1053         static const char *acc_stats_border = "+++++++++++++++";
1054
1055         if (all_ports_started() == 0) {
1056                 printf("Not all ports were started\n");
1057                 return;
1058         }
1059         if (test_done) {
1060                 printf("Packet forwarding not started\n");
1061                 return;
1062         }
1063         printf("Telling cores to stop...");
1064         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1065                 fwd_lcores[lc_id]->stopped = 1;
1066         printf("\nWaiting for lcores to finish...\n");
1067         rte_eal_mp_wait_lcore();
1068         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1069         if (port_fwd_end != NULL) {
1070                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1071                         pt_id = fwd_ports_ids[i];
1072                         (*port_fwd_end)(pt_id);
1073                 }
1074         }
1075 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1076         fwd_cycles = 0;
1077 #endif
1078         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1079                 if (cur_fwd_config.nb_fwd_streams >
1080                     cur_fwd_config.nb_fwd_ports) {
1081                         fwd_stream_stats_display(sm_id);
1082                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1083                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1084                 } else {
1085                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1086                                 fwd_streams[sm_id];
1087                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1088                                 fwd_streams[sm_id];
1089                 }
1090                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1091                 tx_dropped = (uint64_t) (tx_dropped +
1092                                          fwd_streams[sm_id]->fwd_dropped);
1093                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1094
1095                 rx_bad_ip_csum =
1096                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1097                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1098                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1099                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1100                                                         rx_bad_ip_csum;
1101
1102                 rx_bad_l4_csum =
1103                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1104                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1105                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1106                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1107                                                         rx_bad_l4_csum;
1108
1109 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1110                 fwd_cycles = (uint64_t) (fwd_cycles +
1111                                          fwd_streams[sm_id]->core_cycles);
1112 #endif
1113         }
1114         total_recv = 0;
1115         total_xmit = 0;
1116         total_rx_dropped = 0;
1117         total_tx_dropped = 0;
1118         total_rx_nombuf  = 0;
1119         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1120                 pt_id = fwd_ports_ids[i];
1121
1122                 port = &ports[pt_id];
1123                 rte_eth_stats_get(pt_id, &stats);
1124                 stats.ipackets -= port->stats.ipackets;
1125                 port->stats.ipackets = 0;
1126                 stats.opackets -= port->stats.opackets;
1127                 port->stats.opackets = 0;
1128                 stats.ibytes   -= port->stats.ibytes;
1129                 port->stats.ibytes = 0;
1130                 stats.obytes   -= port->stats.obytes;
1131                 port->stats.obytes = 0;
1132                 stats.imissed  -= port->stats.imissed;
1133                 port->stats.imissed = 0;
1134                 stats.oerrors  -= port->stats.oerrors;
1135                 port->stats.oerrors = 0;
1136                 stats.rx_nombuf -= port->stats.rx_nombuf;
1137                 port->stats.rx_nombuf = 0;
1138
1139                 total_recv += stats.ipackets;
1140                 total_xmit += stats.opackets;
1141                 total_rx_dropped += stats.imissed;
1142                 total_tx_dropped += port->tx_dropped;
1143                 total_rx_nombuf  += stats.rx_nombuf;
1144
1145                 fwd_port_stats_display(pt_id, &stats);
1146         }
1147         printf("\n  %s Accumulated forward statistics for all ports"
1148                "%s\n",
1149                acc_stats_border, acc_stats_border);
1150         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1151                "%-"PRIu64"\n"
1152                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1153                "%-"PRIu64"\n",
1154                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1155                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1156         if (total_rx_nombuf > 0)
1157                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1158         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1159                "%s\n",
1160                acc_stats_border, acc_stats_border);
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1162         if (total_recv > 0)
1163                 printf("\n  CPU cycles/packet=%u (total cycles="
1164                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1165                        (unsigned int)(fwd_cycles / total_recv),
1166                        fwd_cycles, total_recv);
1167 #endif
1168         printf("\nDone.\n");
1169         test_done = 1;
1170 }
1171
1172 void
1173 dev_set_link_up(portid_t pid)
1174 {
1175         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1176                 printf("\nSet link up fail.\n");
1177 }
1178
1179 void
1180 dev_set_link_down(portid_t pid)
1181 {
1182         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1183                 printf("\nSet link down fail.\n");
1184 }
1185
1186 static int
1187 all_ports_started(void)
1188 {
1189         portid_t pi;
1190         struct rte_port *port;
1191
1192         FOREACH_PORT(pi, ports) {
1193                 port = &ports[pi];
1194                 /* Check if there is a port which is not started */
1195                 if ((port->port_status != RTE_PORT_STARTED) &&
1196                         (port->slave_flag == 0))
1197                         return 0;
1198         }
1199
1200         /* No port is not started */
1201         return 1;
1202 }
1203
1204 int
1205 all_ports_stopped(void)
1206 {
1207         portid_t pi;
1208         struct rte_port *port;
1209
1210         FOREACH_PORT(pi, ports) {
1211                 port = &ports[pi];
1212                 if ((port->port_status != RTE_PORT_STOPPED) &&
1213                         (port->slave_flag == 0))
1214                         return 0;
1215         }
1216
1217         return 1;
1218 }
1219
1220 int
1221 port_is_started(portid_t port_id)
1222 {
1223         if (port_id_is_invalid(port_id, ENABLED_WARN))
1224                 return 0;
1225
1226         if (ports[port_id].port_status != RTE_PORT_STARTED)
1227                 return 0;
1228
1229         return 1;
1230 }
1231
1232 static int
1233 port_is_closed(portid_t port_id)
1234 {
1235         if (port_id_is_invalid(port_id, ENABLED_WARN))
1236                 return 0;
1237
1238         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1239                 return 0;
1240
1241         return 1;
1242 }
1243
1244 int
1245 start_port(portid_t pid)
1246 {
1247         int diag, need_check_link_status = -1;
1248         portid_t pi;
1249         queueid_t qi;
1250         struct rte_port *port;
1251         struct ether_addr mac_addr;
1252
1253         if (test_done == 0) {
1254                 printf("Please stop forwarding first\n");
1255                 return -1;
1256         }
1257
1258         if (port_id_is_invalid(pid, ENABLED_WARN))
1259                 return 0;
1260
1261         if (init_fwd_streams() < 0) {
1262                 printf("Fail from init_fwd_streams()\n");
1263                 return -1;
1264         }
1265
1266         if(dcb_config)
1267                 dcb_test = 1;
1268         FOREACH_PORT(pi, ports) {
1269                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1270                         continue;
1271
1272                 need_check_link_status = 0;
1273                 port = &ports[pi];
1274                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1275                                                  RTE_PORT_HANDLING) == 0) {
1276                         printf("Port %d is now not stopped\n", pi);
1277                         continue;
1278                 }
1279
1280                 if (port->need_reconfig > 0) {
1281                         port->need_reconfig = 0;
1282
1283                         printf("Configuring Port %d (socket %u)\n", pi,
1284                                         port->socket_id);
1285                         /* configure port */
1286                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1287                                                 &(port->dev_conf));
1288                         if (diag != 0) {
1289                                 if (rte_atomic16_cmpset(&(port->port_status),
1290                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1291                                         printf("Port %d can not be set back "
1292                                                         "to stopped\n", pi);
1293                                 printf("Fail to configure port %d\n", pi);
1294                                 /* try to reconfigure port next time */
1295                                 port->need_reconfig = 1;
1296                                 return -1;
1297                         }
1298                 }
1299                 if (port->need_reconfig_queues > 0) {
1300                         port->need_reconfig_queues = 0;
1301                         /* setup tx queues */
1302                         for (qi = 0; qi < nb_txq; qi++) {
1303                                 if ((numa_support) &&
1304                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1305                                         diag = rte_eth_tx_queue_setup(pi, qi,
1306                                                 nb_txd,txring_numa[pi],
1307                                                 &(port->tx_conf));
1308                                 else
1309                                         diag = rte_eth_tx_queue_setup(pi, qi,
1310                                                 nb_txd,port->socket_id,
1311                                                 &(port->tx_conf));
1312
1313                                 if (diag == 0)
1314                                         continue;
1315
1316                                 /* Fail to setup tx queue, return */
1317                                 if (rte_atomic16_cmpset(&(port->port_status),
1318                                                         RTE_PORT_HANDLING,
1319                                                         RTE_PORT_STOPPED) == 0)
1320                                         printf("Port %d can not be set back "
1321                                                         "to stopped\n", pi);
1322                                 printf("Fail to configure port %d tx queues\n", pi);
1323                                 /* try to reconfigure queues next time */
1324                                 port->need_reconfig_queues = 1;
1325                                 return -1;
1326                         }
1327                         /* setup rx queues */
1328                         for (qi = 0; qi < nb_rxq; qi++) {
1329                                 if ((numa_support) &&
1330                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1331                                         struct rte_mempool * mp =
1332                                                 mbuf_pool_find(rxring_numa[pi]);
1333                                         if (mp == NULL) {
1334                                                 printf("Failed to setup RX queue:"
1335                                                         "No mempool allocation"
1336                                                         "on the socket %d\n",
1337                                                         rxring_numa[pi]);
1338                                                 return -1;
1339                                         }
1340
1341                                         diag = rte_eth_rx_queue_setup(pi, qi,
1342                                              nb_rxd,rxring_numa[pi],
1343                                              &(port->rx_conf),mp);
1344                                 }
1345                                 else
1346                                         diag = rte_eth_rx_queue_setup(pi, qi,
1347                                              nb_rxd,port->socket_id,
1348                                              &(port->rx_conf),
1349                                              mbuf_pool_find(port->socket_id));
1350
1351                                 if (diag == 0)
1352                                         continue;
1353
1354
1355                                 /* Fail to setup rx queue, return */
1356                                 if (rte_atomic16_cmpset(&(port->port_status),
1357                                                         RTE_PORT_HANDLING,
1358                                                         RTE_PORT_STOPPED) == 0)
1359                                         printf("Port %d can not be set back "
1360                                                         "to stopped\n", pi);
1361                                 printf("Fail to configure port %d rx queues\n", pi);
1362                                 /* try to reconfigure queues next time */
1363                                 port->need_reconfig_queues = 1;
1364                                 return -1;
1365                         }
1366                 }
1367                 /* start port */
1368                 if (rte_eth_dev_start(pi) < 0) {
1369                         printf("Fail to start port %d\n", pi);
1370
1371                         /* Fail to setup rx queue, return */
1372                         if (rte_atomic16_cmpset(&(port->port_status),
1373                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1374                                 printf("Port %d can not be set back to "
1375                                                         "stopped\n", pi);
1376                         continue;
1377                 }
1378
1379                 if (rte_atomic16_cmpset(&(port->port_status),
1380                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1381                         printf("Port %d can not be set into started\n", pi);
1382
1383                 rte_eth_macaddr_get(pi, &mac_addr);
1384                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1385                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1386                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1387                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1388
1389                 /* at least one port started, need checking link status */
1390                 need_check_link_status = 1;
1391         }
1392
1393         if (need_check_link_status == 1 && !no_link_check)
1394                 check_all_ports_link_status(RTE_PORT_ALL);
1395         else if (need_check_link_status == 0)
1396                 printf("Please stop the ports first\n");
1397
1398         printf("Done\n");
1399         return 0;
1400 }
1401
1402 void
1403 stop_port(portid_t pid)
1404 {
1405         portid_t pi;
1406         struct rte_port *port;
1407         int need_check_link_status = 0;
1408
1409         if (test_done == 0) {
1410                 printf("Please stop forwarding first\n");
1411                 return;
1412         }
1413         if (dcb_test) {
1414                 dcb_test = 0;
1415                 dcb_config = 0;
1416         }
1417
1418         if (port_id_is_invalid(pid, ENABLED_WARN))
1419                 return;
1420
1421         printf("Stopping ports...\n");
1422
1423         FOREACH_PORT(pi, ports) {
1424                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1425                         continue;
1426
1427                 port = &ports[pi];
1428                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1429                                                 RTE_PORT_HANDLING) == 0)
1430                         continue;
1431
1432                 rte_eth_dev_stop(pi);
1433
1434                 if (rte_atomic16_cmpset(&(port->port_status),
1435                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1436                         printf("Port %d can not be set into stopped\n", pi);
1437                 need_check_link_status = 1;
1438         }
1439         if (need_check_link_status && !no_link_check)
1440                 check_all_ports_link_status(RTE_PORT_ALL);
1441
1442         printf("Done\n");
1443 }
1444
1445 void
1446 close_port(portid_t pid)
1447 {
1448         portid_t pi;
1449         struct rte_port *port;
1450
1451         if (test_done == 0) {
1452                 printf("Please stop forwarding first\n");
1453                 return;
1454         }
1455
1456         if (port_id_is_invalid(pid, ENABLED_WARN))
1457                 return;
1458
1459         printf("Closing ports...\n");
1460
1461         FOREACH_PORT(pi, ports) {
1462                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1463                         continue;
1464
1465                 port = &ports[pi];
1466                 if (rte_atomic16_cmpset(&(port->port_status),
1467                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1468                         printf("Port %d is already closed\n", pi);
1469                         continue;
1470                 }
1471
1472                 if (rte_atomic16_cmpset(&(port->port_status),
1473                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1474                         printf("Port %d is now not stopped\n", pi);
1475                         continue;
1476                 }
1477
1478                 rte_eth_dev_close(pi);
1479
1480                 if (rte_atomic16_cmpset(&(port->port_status),
1481                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1482                         printf("Port %d can not be set into stopped\n", pi);
1483         }
1484
1485         printf("Done\n");
1486 }
1487
1488 void
1489 attach_port(char *identifier)
1490 {
1491         portid_t i, j, pi = 0;
1492
1493         printf("Attaching a new port...\n");
1494
1495         if (identifier == NULL) {
1496                 printf("Invalid parameters are specified\n");
1497                 return;
1498         }
1499
1500         if (test_done == 0) {
1501                 printf("Please stop forwarding first\n");
1502                 return;
1503         }
1504
1505         if (rte_eth_dev_attach(identifier, &pi))
1506                 return;
1507
1508         ports[pi].enabled = 1;
1509         reconfig(pi, rte_eth_dev_socket_id(pi));
1510         rte_eth_promiscuous_enable(pi);
1511
1512         nb_ports = rte_eth_dev_count();
1513
1514         /* set_default_fwd_ports_config(); */
1515         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1516         i = 0;
1517         FOREACH_PORT(j, ports) {
1518                 fwd_ports_ids[i] = j;
1519                 i++;
1520         }
1521         nb_cfg_ports = nb_ports;
1522         nb_fwd_ports++;
1523
1524         ports[pi].port_status = RTE_PORT_STOPPED;
1525
1526         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1527         printf("Done\n");
1528 }
1529
1530 void
1531 detach_port(uint8_t port_id)
1532 {
1533         portid_t i, pi = 0;
1534         char name[RTE_ETH_NAME_MAX_LEN];
1535
1536         printf("Detaching a port...\n");
1537
1538         if (!port_is_closed(port_id)) {
1539                 printf("Please close port first\n");
1540                 return;
1541         }
1542
1543         if (rte_eth_dev_detach(port_id, name))
1544                 return;
1545
1546         ports[port_id].enabled = 0;
1547         nb_ports = rte_eth_dev_count();
1548
1549         /* set_default_fwd_ports_config(); */
1550         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1551         i = 0;
1552         FOREACH_PORT(pi, ports) {
1553                 fwd_ports_ids[i] = pi;
1554                 i++;
1555         }
1556         nb_cfg_ports = nb_ports;
1557         nb_fwd_ports--;
1558
1559         printf("Port '%s' is detached. Now total ports is %d\n",
1560                         name, nb_ports);
1561         printf("Done\n");
1562         return;
1563 }
1564
1565 void
1566 pmd_test_exit(void)
1567 {
1568         portid_t pt_id;
1569
1570         if (test_done == 0)
1571                 stop_packet_forwarding();
1572
1573         if (ports != NULL) {
1574                 no_link_check = 1;
1575                 FOREACH_PORT(pt_id, ports) {
1576                         printf("\nShutting down port %d...\n", pt_id);
1577                         fflush(stdout);
1578                         stop_port(pt_id);
1579                         close_port(pt_id);
1580                 }
1581         }
1582         printf("\nBye...\n");
1583 }
1584
1585 typedef void (*cmd_func_t)(void);
1586 struct pmd_test_command {
1587         const char *cmd_name;
1588         cmd_func_t cmd_func;
1589 };
1590
1591 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1592
1593 /* Check the link status of all ports in up to 9s, and print them finally */
1594 static void
1595 check_all_ports_link_status(uint32_t port_mask)
1596 {
1597 #define CHECK_INTERVAL 100 /* 100ms */
1598 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1599         uint8_t portid, count, all_ports_up, print_flag = 0;
1600         struct rte_eth_link link;
1601
1602         printf("Checking link statuses...\n");
1603         fflush(stdout);
1604         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1605                 all_ports_up = 1;
1606                 FOREACH_PORT(portid, ports) {
1607                         if ((port_mask & (1 << portid)) == 0)
1608                                 continue;
1609                         memset(&link, 0, sizeof(link));
1610                         rte_eth_link_get_nowait(portid, &link);
1611                         /* print link status if flag set */
1612                         if (print_flag == 1) {
1613                                 if (link.link_status)
1614                                         printf("Port %d Link Up - speed %u "
1615                                                 "Mbps - %s\n", (uint8_t)portid,
1616                                                 (unsigned)link.link_speed,
1617                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1618                                         ("full-duplex") : ("half-duplex\n"));
1619                                 else
1620                                         printf("Port %d Link Down\n",
1621                                                 (uint8_t)portid);
1622                                 continue;
1623                         }
1624                         /* clear all_ports_up flag if any link down */
1625                         if (link.link_status == 0) {
1626                                 all_ports_up = 0;
1627                                 break;
1628                         }
1629                 }
1630                 /* after finally printing all link status, get out */
1631                 if (print_flag == 1)
1632                         break;
1633
1634                 if (all_ports_up == 0) {
1635                         fflush(stdout);
1636                         rte_delay_ms(CHECK_INTERVAL);
1637                 }
1638
1639                 /* set the print_flag if all ports up or timeout */
1640                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1641                         print_flag = 1;
1642                 }
1643         }
1644 }
1645
1646 static int
1647 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1648 {
1649         uint16_t i;
1650         int diag;
1651         uint8_t mapping_found = 0;
1652
1653         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1654                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1655                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1656                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1657                                         tx_queue_stats_mappings[i].queue_id,
1658                                         tx_queue_stats_mappings[i].stats_counter_id);
1659                         if (diag != 0)
1660                                 return diag;
1661                         mapping_found = 1;
1662                 }
1663         }
1664         if (mapping_found)
1665                 port->tx_queue_stats_mapping_enabled = 1;
1666         return 0;
1667 }
1668
1669 static int
1670 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1671 {
1672         uint16_t i;
1673         int diag;
1674         uint8_t mapping_found = 0;
1675
1676         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1677                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1678                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1679                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1680                                         rx_queue_stats_mappings[i].queue_id,
1681                                         rx_queue_stats_mappings[i].stats_counter_id);
1682                         if (diag != 0)
1683                                 return diag;
1684                         mapping_found = 1;
1685                 }
1686         }
1687         if (mapping_found)
1688                 port->rx_queue_stats_mapping_enabled = 1;
1689         return 0;
1690 }
1691
1692 static void
1693 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1694 {
1695         int diag = 0;
1696
1697         diag = set_tx_queue_stats_mapping_registers(pi, port);
1698         if (diag != 0) {
1699                 if (diag == -ENOTSUP) {
1700                         port->tx_queue_stats_mapping_enabled = 0;
1701                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1702                 }
1703                 else
1704                         rte_exit(EXIT_FAILURE,
1705                                         "set_tx_queue_stats_mapping_registers "
1706                                         "failed for port id=%d diag=%d\n",
1707                                         pi, diag);
1708         }
1709
1710         diag = set_rx_queue_stats_mapping_registers(pi, port);
1711         if (diag != 0) {
1712                 if (diag == -ENOTSUP) {
1713                         port->rx_queue_stats_mapping_enabled = 0;
1714                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1715                 }
1716                 else
1717                         rte_exit(EXIT_FAILURE,
1718                                         "set_rx_queue_stats_mapping_registers "
1719                                         "failed for port id=%d diag=%d\n",
1720                                         pi, diag);
1721         }
1722 }
1723
1724 static void
1725 rxtx_port_config(struct rte_port *port)
1726 {
1727         port->rx_conf = port->dev_info.default_rxconf;
1728         port->tx_conf = port->dev_info.default_txconf;
1729
1730         /* Check if any RX/TX parameters have been passed */
1731         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1732                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1733
1734         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1735                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1736
1737         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1738                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1739
1740         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1741                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1742
1743         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1744                 port->rx_conf.rx_drop_en = rx_drop_en;
1745
1746         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1747                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1748
1749         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1750                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1751
1752         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1753                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1754
1755         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1756                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1757
1758         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1759                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1760
1761         if (txq_flags != RTE_PMD_PARAM_UNSET)
1762                 port->tx_conf.txq_flags = txq_flags;
1763 }
1764
1765 void
1766 init_port_config(void)
1767 {
1768         portid_t pid;
1769         struct rte_port *port;
1770
1771         FOREACH_PORT(pid, ports) {
1772                 port = &ports[pid];
1773                 port->dev_conf.rxmode = rx_mode;
1774                 port->dev_conf.fdir_conf = fdir_conf;
1775                 if (nb_rxq > 1) {
1776                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1777                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1778                 } else {
1779                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1780                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1781                 }
1782
1783                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1784                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1785                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1786                         else
1787                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1788                 }
1789
1790                 if (port->dev_info.max_vfs != 0) {
1791                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1792                                 port->dev_conf.rxmode.mq_mode =
1793                                         ETH_MQ_RX_VMDQ_RSS;
1794                         else
1795                                 port->dev_conf.rxmode.mq_mode =
1796                                         ETH_MQ_RX_NONE;
1797
1798                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1799                 }
1800
1801                 rxtx_port_config(port);
1802
1803                 rte_eth_macaddr_get(pid, &port->eth_addr);
1804
1805                 map_port_queue_stats_mapping_registers(pid, port);
1806 #ifdef RTE_NIC_BYPASS
1807                 rte_eth_dev_bypass_init(pid);
1808 #endif
1809         }
1810 }
1811
1812 void set_port_slave_flag(portid_t slave_pid)
1813 {
1814         struct rte_port *port;
1815
1816         port = &ports[slave_pid];
1817         port->slave_flag = 1;
1818 }
1819
1820 void clear_port_slave_flag(portid_t slave_pid)
1821 {
1822         struct rte_port *port;
1823
1824         port = &ports[slave_pid];
1825         port->slave_flag = 0;
1826 }
1827
1828 const uint16_t vlan_tags[] = {
1829                 0,  1,  2,  3,  4,  5,  6,  7,
1830                 8,  9, 10, 11,  12, 13, 14, 15,
1831                 16, 17, 18, 19, 20, 21, 22, 23,
1832                 24, 25, 26, 27, 28, 29, 30, 31
1833 };
1834
1835 static  int
1836 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1837                  enum dcb_mode_enable dcb_mode,
1838                  enum rte_eth_nb_tcs num_tcs,
1839                  uint8_t pfc_en)
1840 {
1841         uint8_t i;
1842
1843         /*
1844          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1845          * given above, and the number of traffic classes available for use.
1846          */
1847         if (dcb_mode == DCB_VT_ENABLED) {
1848                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1849                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1850                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1851                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1852
1853                 /* VMDQ+DCB RX and TX configrations */
1854                 vmdq_rx_conf->enable_default_pool = 0;
1855                 vmdq_rx_conf->default_pool = 0;
1856                 vmdq_rx_conf->nb_queue_pools =
1857                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1858                 vmdq_tx_conf->nb_queue_pools =
1859                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1860
1861                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1862                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1863                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1864                         vmdq_rx_conf->pool_map[i].pools =
1865                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1866                 }
1867                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1868                         vmdq_rx_conf->dcb_tc[i] = i;
1869                         vmdq_tx_conf->dcb_tc[i] = i;
1870                 }
1871
1872                 /* set DCB mode of RX and TX of multiple queues */
1873                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1874                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1875         } else {
1876                 struct rte_eth_dcb_rx_conf *rx_conf =
1877                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1878                 struct rte_eth_dcb_tx_conf *tx_conf =
1879                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1880
1881                 rx_conf->nb_tcs = num_tcs;
1882                 tx_conf->nb_tcs = num_tcs;
1883
1884                 for (i = 0; i < num_tcs; i++) {
1885                         rx_conf->dcb_tc[i] = i;
1886                         tx_conf->dcb_tc[i] = i;
1887                 }
1888                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1889                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1890                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1891         }
1892
1893         if (pfc_en)
1894                 eth_conf->dcb_capability_en =
1895                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1896         else
1897                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1898
1899         return 0;
1900 }
1901
1902 int
1903 init_port_dcb_config(portid_t pid,
1904                      enum dcb_mode_enable dcb_mode,
1905                      enum rte_eth_nb_tcs num_tcs,
1906                      uint8_t pfc_en)
1907 {
1908         struct rte_eth_conf port_conf;
1909         struct rte_eth_dev_info dev_info;
1910         struct rte_port *rte_port;
1911         int retval;
1912         uint16_t i;
1913
1914         rte_eth_dev_info_get(pid, &dev_info);
1915
1916         /* If dev_info.vmdq_pool_base is greater than 0,
1917          * the queue id of vmdq pools is started after pf queues.
1918          */
1919         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1920                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1921                         " for port %d.", pid);
1922                 return -1;
1923         }
1924
1925         /* Assume the ports in testpmd have the same dcb capability
1926          * and has the same number of rxq and txq in dcb mode
1927          */
1928         if (dcb_mode == DCB_VT_ENABLED) {
1929                 nb_rxq = dev_info.max_rx_queues;
1930                 nb_txq = dev_info.max_tx_queues;
1931         } else {
1932                 /*if vt is disabled, use all pf queues */
1933                 if (dev_info.vmdq_pool_base == 0) {
1934                         nb_rxq = dev_info.max_rx_queues;
1935                         nb_txq = dev_info.max_tx_queues;
1936                 } else {
1937                         nb_rxq = (queueid_t)num_tcs;
1938                         nb_txq = (queueid_t)num_tcs;
1939
1940                 }
1941         }
1942         rx_free_thresh = 64;
1943
1944         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1945         /* Enter DCB configuration status */
1946         dcb_config = 1;
1947
1948         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1949         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1950         if (retval < 0)
1951                 return retval;
1952
1953         rte_port = &ports[pid];
1954         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1955
1956         rxtx_port_config(rte_port);
1957         /* VLAN filter */
1958         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1959         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1960                 rx_vft_set(pid, vlan_tags[i], 1);
1961
1962         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1963         map_port_queue_stats_mapping_registers(pid, rte_port);
1964
1965         rte_port->dcb_flag = 1;
1966
1967         return 0;
1968 }
1969
1970 static void
1971 init_port(void)
1972 {
1973         portid_t pid;
1974
1975         /* Configuration of Ethernet ports. */
1976         ports = rte_zmalloc("testpmd: ports",
1977                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1978                             RTE_CACHE_LINE_SIZE);
1979         if (ports == NULL) {
1980                 rte_exit(EXIT_FAILURE,
1981                                 "rte_zmalloc(%d struct rte_port) failed\n",
1982                                 RTE_MAX_ETHPORTS);
1983         }
1984
1985         /* enabled allocated ports */
1986         for (pid = 0; pid < nb_ports; pid++)
1987                 ports[pid].enabled = 1;
1988 }
1989
1990 static void
1991 force_quit(void)
1992 {
1993         pmd_test_exit();
1994         prompt_exit();
1995 }
1996
1997 static void
1998 signal_handler(int signum)
1999 {
2000         if (signum == SIGINT || signum == SIGTERM) {
2001                 printf("\nSignal %d received, preparing to exit...\n",
2002                                 signum);
2003                 force_quit();
2004                 /* exit with the expected status */
2005                 signal(signum, SIG_DFL);
2006                 kill(getpid(), signum);
2007         }
2008 }
2009
2010 int
2011 main(int argc, char** argv)
2012 {
2013         int  diag;
2014         uint8_t port_id;
2015
2016         signal(SIGINT, signal_handler);
2017         signal(SIGTERM, signal_handler);
2018
2019         diag = rte_eal_init(argc, argv);
2020         if (diag < 0)
2021                 rte_panic("Cannot init EAL\n");
2022
2023         nb_ports = (portid_t) rte_eth_dev_count();
2024         if (nb_ports == 0)
2025                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2026
2027         /* allocate port structures, and init them */
2028         init_port();
2029
2030         set_def_fwd_config();
2031         if (nb_lcores == 0)
2032                 rte_panic("Empty set of forwarding logical cores - check the "
2033                           "core mask supplied in the command parameters\n");
2034
2035         argc -= diag;
2036         argv += diag;
2037         if (argc > 1)
2038                 launch_args_parse(argc, argv);
2039
2040         if (nb_rxq > nb_txq)
2041                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2042                        "but nb_txq=%d will prevent to fully test it.\n",
2043                        nb_rxq, nb_txq);
2044
2045         init_config();
2046         if (start_port(RTE_PORT_ALL) != 0)
2047                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2048
2049         /* set all ports to promiscuous mode by default */
2050         FOREACH_PORT(port_id, ports)
2051                 rte_eth_promiscuous_enable(port_id);
2052
2053 #ifdef RTE_LIBRTE_CMDLINE
2054         if (interactive == 1) {
2055                 if (auto_start) {
2056                         printf("Start automatic packet forwarding\n");
2057                         start_packet_forwarding(0);
2058                 }
2059                 prompt();
2060         } else
2061 #endif
2062         {
2063                 char c;
2064                 int rc;
2065
2066                 printf("No commandline core given, start packet forwarding\n");
2067                 start_packet_forwarding(0);
2068                 printf("Press enter to exit\n");
2069                 rc = read(0, &c, 1);
2070                 pmd_test_exit();
2071                 if (rc < 0)
2072                         return 1;
2073         }
2074
2075         return 0;
2076 }