1f2445e4ca7b720586b222d20b69989b862a11f8
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301         },
302         .drop_queue = 127,
303 };
304
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
306
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
309
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
312
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
315
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint32_t port_mask);
319
320 /*
321  * Check if all the ports are started.
322  * If yes, return positive value. If not, return zero.
323  */
324 static int all_ports_started(void);
325
326 /*
327  * Find next enabled port
328  */
329 portid_t
330 find_next_port(portid_t p, struct rte_port *ports, int size)
331 {
332         if (ports == NULL)
333                 rte_exit(-EINVAL, "failed to find a next port id\n");
334
335         while ((p < size) && (ports[p].enabled == 0))
336                 p++;
337         return p;
338 }
339
340 /*
341  * Setup default configuration.
342  */
343 static void
344 set_default_fwd_lcores_config(void)
345 {
346         unsigned int i;
347         unsigned int nb_lc;
348
349         nb_lc = 0;
350         for (i = 0; i < RTE_MAX_LCORE; i++) {
351                 if (! rte_lcore_is_enabled(i))
352                         continue;
353                 if (i == rte_get_master_lcore())
354                         continue;
355                 fwd_lcores_cpuids[nb_lc++] = i;
356         }
357         nb_lcores = (lcoreid_t) nb_lc;
358         nb_cfg_lcores = nb_lcores;
359         nb_fwd_lcores = 1;
360 }
361
362 static void
363 set_def_peer_eth_addrs(void)
364 {
365         portid_t i;
366
367         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
368                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
369                 peer_eth_addrs[i].addr_bytes[5] = i;
370         }
371 }
372
373 static void
374 set_default_fwd_ports_config(void)
375 {
376         portid_t pt_id;
377
378         for (pt_id = 0; pt_id < nb_ports; pt_id++)
379                 fwd_ports_ids[pt_id] = pt_id;
380
381         nb_cfg_ports = nb_ports;
382         nb_fwd_ports = nb_ports;
383 }
384
385 void
386 set_def_fwd_config(void)
387 {
388         set_default_fwd_lcores_config();
389         set_def_peer_eth_addrs();
390         set_default_fwd_ports_config();
391 }
392
393 /*
394  * Configuration initialisation done once at init time.
395  */
396 static void
397 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
398                  unsigned int socket_id)
399 {
400         char pool_name[RTE_MEMPOOL_NAMESIZE];
401         struct rte_mempool *rte_mp;
402         uint32_t mb_size;
403
404         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
405         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
406
407 #ifdef RTE_LIBRTE_PMD_XENVIRT
408         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
409                                    (unsigned) mb_mempool_cache,
410                                    sizeof(struct rte_pktmbuf_pool_private),
411                                    rte_pktmbuf_pool_init, NULL,
412                                    rte_pktmbuf_init, NULL,
413                                    socket_id, 0);
414
415
416
417 #else
418         if (mp_anon != 0)
419                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
420                                     (unsigned) mb_mempool_cache,
421                                     sizeof(struct rte_pktmbuf_pool_private),
422                                     rte_pktmbuf_pool_init, NULL,
423                                     rte_pktmbuf_init, NULL,
424                                     socket_id, 0);
425         else
426                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
427                                     (unsigned) mb_mempool_cache,
428                                     sizeof(struct rte_pktmbuf_pool_private),
429                                     rte_pktmbuf_pool_init, NULL,
430                                     rte_pktmbuf_init, NULL,
431                                     socket_id, 0);
432
433 #endif
434
435         if (rte_mp == NULL) {
436                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
437                                                 "failed\n", socket_id);
438         } else if (verbose_level > 0) {
439                 rte_mempool_dump(stdout, rte_mp);
440         }
441 }
442
443 /*
444  * Check given socket id is valid or not with NUMA mode,
445  * if valid, return 0, else return -1
446  */
447 static int
448 check_socket_id(const unsigned int socket_id)
449 {
450         static int warning_once = 0;
451
452         if (socket_id >= MAX_SOCKET) {
453                 if (!warning_once && numa_support)
454                         printf("Warning: NUMA should be configured manually by"
455                                " using --port-numa-config and"
456                                " --ring-numa-config parameters along with"
457                                " --numa.\n");
458                 warning_once = 1;
459                 return -1;
460         }
461         return 0;
462 }
463
464 static void
465 init_config(void)
466 {
467         portid_t pid;
468         struct rte_port *port;
469         struct rte_mempool *mbp;
470         unsigned int nb_mbuf_per_pool;
471         lcoreid_t  lc_id;
472         uint8_t port_per_socket[MAX_SOCKET];
473
474         memset(port_per_socket,0,MAX_SOCKET);
475         /* Configuration of logical cores. */
476         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
477                                 sizeof(struct fwd_lcore *) * nb_lcores,
478                                 RTE_CACHE_LINE_SIZE);
479         if (fwd_lcores == NULL) {
480                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
481                                                         "failed\n", nb_lcores);
482         }
483         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
484                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
485                                                sizeof(struct fwd_lcore),
486                                                RTE_CACHE_LINE_SIZE);
487                 if (fwd_lcores[lc_id] == NULL) {
488                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
489                                                                 "failed\n");
490                 }
491                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
492         }
493
494         /*
495          * Create pools of mbuf.
496          * If NUMA support is disabled, create a single pool of mbuf in
497          * socket 0 memory by default.
498          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
499          *
500          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
501          * nb_txd can be configured at run time.
502          */
503         if (param_total_num_mbufs)
504                 nb_mbuf_per_pool = param_total_num_mbufs;
505         else {
506                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
507                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
508
509                 if (!numa_support)
510                         nb_mbuf_per_pool =
511                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
512         }
513
514         if (!numa_support) {
515                 if (socket_num == UMA_NO_CONFIG)
516                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
517                 else
518                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
519                                                  socket_num);
520         }
521
522         FOREACH_PORT(pid, ports) {
523                 port = &ports[pid];
524                 rte_eth_dev_info_get(pid, &port->dev_info);
525
526                 if (numa_support) {
527                         if (port_numa[pid] != NUMA_NO_CONFIG)
528                                 port_per_socket[port_numa[pid]]++;
529                         else {
530                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
531
532                                 /* if socket_id is invalid, set to 0 */
533                                 if (check_socket_id(socket_id) < 0)
534                                         socket_id = 0;
535                                 port_per_socket[socket_id]++;
536                         }
537                 }
538
539                 /* set flag to initialize port/queue */
540                 port->need_reconfig = 1;
541                 port->need_reconfig_queues = 1;
542         }
543
544         if (numa_support) {
545                 uint8_t i;
546                 unsigned int nb_mbuf;
547
548                 if (param_total_num_mbufs)
549                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
550
551                 for (i = 0; i < MAX_SOCKET; i++) {
552                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
553                         if (nb_mbuf)
554                                 mbuf_pool_create(mbuf_data_size,
555                                                 nb_mbuf,i);
556                 }
557         }
558         init_port_config();
559
560         /*
561          * Records which Mbuf pool to use by each logical core, if needed.
562          */
563         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
564                 mbp = mbuf_pool_find(
565                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
566
567                 if (mbp == NULL)
568                         mbp = mbuf_pool_find(0);
569                 fwd_lcores[lc_id]->mbp = mbp;
570         }
571
572         /* Configuration of packet forwarding streams. */
573         if (init_fwd_streams() < 0)
574                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
575 }
576
577
578 void
579 reconfig(portid_t new_port_id, unsigned socket_id)
580 {
581         struct rte_port *port;
582
583         /* Reconfiguration of Ethernet ports. */
584         port = &ports[new_port_id];
585         rte_eth_dev_info_get(new_port_id, &port->dev_info);
586
587         /* set flag to initialize port/queue */
588         port->need_reconfig = 1;
589         port->need_reconfig_queues = 1;
590         port->socket_id = socket_id;
591
592         init_port_config();
593 }
594
595
596 int
597 init_fwd_streams(void)
598 {
599         portid_t pid;
600         struct rte_port *port;
601         streamid_t sm_id, nb_fwd_streams_new;
602
603         /* set socket id according to numa or not */
604         FOREACH_PORT(pid, ports) {
605                 port = &ports[pid];
606                 if (nb_rxq > port->dev_info.max_rx_queues) {
607                         printf("Fail: nb_rxq(%d) is greater than "
608                                 "max_rx_queues(%d)\n", nb_rxq,
609                                 port->dev_info.max_rx_queues);
610                         return -1;
611                 }
612                 if (nb_txq > port->dev_info.max_tx_queues) {
613                         printf("Fail: nb_txq(%d) is greater than "
614                                 "max_tx_queues(%d)\n", nb_txq,
615                                 port->dev_info.max_tx_queues);
616                         return -1;
617                 }
618                 if (numa_support) {
619                         if (port_numa[pid] != NUMA_NO_CONFIG)
620                                 port->socket_id = port_numa[pid];
621                         else {
622                                 port->socket_id = rte_eth_dev_socket_id(pid);
623
624                                 /* if socket_id is invalid, set to 0 */
625                                 if (check_socket_id(port->socket_id) < 0)
626                                         port->socket_id = 0;
627                         }
628                 }
629                 else {
630                         if (socket_num == UMA_NO_CONFIG)
631                                 port->socket_id = 0;
632                         else
633                                 port->socket_id = socket_num;
634                 }
635         }
636
637         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
638         if (nb_fwd_streams_new == nb_fwd_streams)
639                 return 0;
640         /* clear the old */
641         if (fwd_streams != NULL) {
642                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
643                         if (fwd_streams[sm_id] == NULL)
644                                 continue;
645                         rte_free(fwd_streams[sm_id]);
646                         fwd_streams[sm_id] = NULL;
647                 }
648                 rte_free(fwd_streams);
649                 fwd_streams = NULL;
650         }
651
652         /* init new */
653         nb_fwd_streams = nb_fwd_streams_new;
654         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
655                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
656         if (fwd_streams == NULL)
657                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
658                                                 "failed\n", nb_fwd_streams);
659
660         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
661                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
662                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
663                 if (fwd_streams[sm_id] == NULL)
664                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
665                                                                 " failed\n");
666         }
667
668         return 0;
669 }
670
671 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
672 static void
673 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
674 {
675         unsigned int total_burst;
676         unsigned int nb_burst;
677         unsigned int burst_stats[3];
678         uint16_t pktnb_stats[3];
679         uint16_t nb_pkt;
680         int burst_percent[3];
681
682         /*
683          * First compute the total number of packet bursts and the
684          * two highest numbers of bursts of the same number of packets.
685          */
686         total_burst = 0;
687         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
688         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
689         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
690                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
691                 if (nb_burst == 0)
692                         continue;
693                 total_burst += nb_burst;
694                 if (nb_burst > burst_stats[0]) {
695                         burst_stats[1] = burst_stats[0];
696                         pktnb_stats[1] = pktnb_stats[0];
697                         burst_stats[0] = nb_burst;
698                         pktnb_stats[0] = nb_pkt;
699                 }
700         }
701         if (total_burst == 0)
702                 return;
703         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
704         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
705                burst_percent[0], (int) pktnb_stats[0]);
706         if (burst_stats[0] == total_burst) {
707                 printf("]\n");
708                 return;
709         }
710         if (burst_stats[0] + burst_stats[1] == total_burst) {
711                 printf(" + %d%% of %d pkts]\n",
712                        100 - burst_percent[0], pktnb_stats[1]);
713                 return;
714         }
715         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
716         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
717         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
718                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
719                 return;
720         }
721         printf(" + %d%% of %d pkts + %d%% of others]\n",
722                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
723 }
724 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
725
726 static void
727 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
728 {
729         struct rte_port *port;
730         uint8_t i;
731
732         static const char *fwd_stats_border = "----------------------";
733
734         port = &ports[port_id];
735         printf("\n  %s Forward statistics for port %-2d %s\n",
736                fwd_stats_border, port_id, fwd_stats_border);
737
738         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
739                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
740                        "%-"PRIu64"\n",
741                        stats->ipackets, stats->imissed,
742                        (uint64_t) (stats->ipackets + stats->imissed));
743
744                 if (cur_fwd_eng == &csum_fwd_engine)
745                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
746                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
747                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
748                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
749                                "RX-error: %-"PRIu64"\n",
750                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
751                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
752                 }
753
754                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
755                        "%-"PRIu64"\n",
756                        stats->opackets, port->tx_dropped,
757                        (uint64_t) (stats->opackets + port->tx_dropped));
758         }
759         else {
760                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
761                        "%14"PRIu64"\n",
762                        stats->ipackets, stats->imissed,
763                        (uint64_t) (stats->ipackets + stats->imissed));
764
765                 if (cur_fwd_eng == &csum_fwd_engine)
766                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
767                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
768                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
769                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
770                                "    RX-error:%"PRIu64"\n",
771                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
772                         printf("  RX-nombufs:             %14"PRIu64"\n",
773                                stats->rx_nombuf);
774                 }
775
776                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
777                        "%14"PRIu64"\n",
778                        stats->opackets, port->tx_dropped,
779                        (uint64_t) (stats->opackets + port->tx_dropped));
780         }
781
782         /* Display statistics of XON/XOFF pause frames, if any. */
783         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
784              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
785                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
786                        stats->rx_pause_xoff, stats->rx_pause_xon);
787                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
788                        stats->tx_pause_xoff, stats->tx_pause_xon);
789         }
790
791 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
792         if (port->rx_stream)
793                 pkt_burst_stats_display("RX",
794                         &port->rx_stream->rx_burst_stats);
795         if (port->tx_stream)
796                 pkt_burst_stats_display("TX",
797                         &port->tx_stream->tx_burst_stats);
798 #endif
799         /* stats fdir */
800         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
801                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
802                        stats->fdirmiss,
803                        stats->fdirmatch);
804
805         if (port->rx_queue_stats_mapping_enabled) {
806                 printf("\n");
807                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
808                         printf("  Stats reg %2d RX-packets:%14"PRIu64
809                                "     RX-errors:%14"PRIu64
810                                "    RX-bytes:%14"PRIu64"\n",
811                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
812                 }
813                 printf("\n");
814         }
815         if (port->tx_queue_stats_mapping_enabled) {
816                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
817                         printf("  Stats reg %2d TX-packets:%14"PRIu64
818                                "                                 TX-bytes:%14"PRIu64"\n",
819                                i, stats->q_opackets[i], stats->q_obytes[i]);
820                 }
821         }
822
823         printf("  %s--------------------------------%s\n",
824                fwd_stats_border, fwd_stats_border);
825 }
826
827 static void
828 fwd_stream_stats_display(streamid_t stream_id)
829 {
830         struct fwd_stream *fs;
831         static const char *fwd_top_stats_border = "-------";
832
833         fs = fwd_streams[stream_id];
834         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
835             (fs->fwd_dropped == 0))
836                 return;
837         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
838                "TX Port=%2d/Queue=%2d %s\n",
839                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
840                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
841         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
842                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
843
844         /* if checksum mode */
845         if (cur_fwd_eng == &csum_fwd_engine) {
846                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
847                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
848         }
849
850 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
851         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
852         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
853 #endif
854 }
855
856 static void
857 flush_fwd_rx_queues(void)
858 {
859         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
860         portid_t  rxp;
861         portid_t port_id;
862         queueid_t rxq;
863         uint16_t  nb_rx;
864         uint16_t  i;
865         uint8_t   j;
866
867         for (j = 0; j < 2; j++) {
868                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
869                         for (rxq = 0; rxq < nb_rxq; rxq++) {
870                                 port_id = fwd_ports_ids[rxp];
871                                 do {
872                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
873                                                 pkts_burst, MAX_PKT_BURST);
874                                         for (i = 0; i < nb_rx; i++)
875                                                 rte_pktmbuf_free(pkts_burst[i]);
876                                 } while (nb_rx > 0);
877                         }
878                 }
879                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
880         }
881 }
882
883 static void
884 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
885 {
886         struct fwd_stream **fsm;
887         streamid_t nb_fs;
888         streamid_t sm_id;
889
890         fsm = &fwd_streams[fc->stream_idx];
891         nb_fs = fc->stream_nb;
892         do {
893                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
894                         (*pkt_fwd)(fsm[sm_id]);
895         } while (! fc->stopped);
896 }
897
898 static int
899 start_pkt_forward_on_core(void *fwd_arg)
900 {
901         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
902                              cur_fwd_config.fwd_eng->packet_fwd);
903         return 0;
904 }
905
906 /*
907  * Run the TXONLY packet forwarding engine to send a single burst of packets.
908  * Used to start communication flows in network loopback test configurations.
909  */
910 static int
911 run_one_txonly_burst_on_core(void *fwd_arg)
912 {
913         struct fwd_lcore *fwd_lc;
914         struct fwd_lcore tmp_lcore;
915
916         fwd_lc = (struct fwd_lcore *) fwd_arg;
917         tmp_lcore = *fwd_lc;
918         tmp_lcore.stopped = 1;
919         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
920         return 0;
921 }
922
923 /*
924  * Launch packet forwarding:
925  *     - Setup per-port forwarding context.
926  *     - launch logical cores with their forwarding configuration.
927  */
928 static void
929 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
930 {
931         port_fwd_begin_t port_fwd_begin;
932         unsigned int i;
933         unsigned int lc_id;
934         int diag;
935
936         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
937         if (port_fwd_begin != NULL) {
938                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
939                         (*port_fwd_begin)(fwd_ports_ids[i]);
940         }
941         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
942                 lc_id = fwd_lcores_cpuids[i];
943                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
944                         fwd_lcores[i]->stopped = 0;
945                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
946                                                      fwd_lcores[i], lc_id);
947                         if (diag != 0)
948                                 printf("launch lcore %u failed - diag=%d\n",
949                                        lc_id, diag);
950                 }
951         }
952 }
953
954 /*
955  * Launch packet forwarding configuration.
956  */
957 void
958 start_packet_forwarding(int with_tx_first)
959 {
960         port_fwd_begin_t port_fwd_begin;
961         port_fwd_end_t  port_fwd_end;
962         struct rte_port *port;
963         unsigned int i;
964         portid_t   pt_id;
965         streamid_t sm_id;
966
967         if (all_ports_started() == 0) {
968                 printf("Not all ports were started\n");
969                 return;
970         }
971         if (test_done == 0) {
972                 printf("Packet forwarding already started\n");
973                 return;
974         }
975         if(dcb_test) {
976                 for (i = 0; i < nb_fwd_ports; i++) {
977                         pt_id = fwd_ports_ids[i];
978                         port = &ports[pt_id];
979                         if (!port->dcb_flag) {
980                                 printf("In DCB mode, all forwarding ports must "
981                                        "be configured in this mode.\n");
982                                 return;
983                         }
984                 }
985                 if (nb_fwd_lcores == 1) {
986                         printf("In DCB mode,the nb forwarding cores "
987                                "should be larger than 1.\n");
988                         return;
989                 }
990         }
991         test_done = 0;
992
993         if(!no_flush_rx)
994                 flush_fwd_rx_queues();
995
996         fwd_config_setup();
997         rxtx_config_display();
998
999         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1000                 pt_id = fwd_ports_ids[i];
1001                 port = &ports[pt_id];
1002                 rte_eth_stats_get(pt_id, &port->stats);
1003                 port->tx_dropped = 0;
1004
1005                 map_port_queue_stats_mapping_registers(pt_id, port);
1006         }
1007         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1008                 fwd_streams[sm_id]->rx_packets = 0;
1009                 fwd_streams[sm_id]->tx_packets = 0;
1010                 fwd_streams[sm_id]->fwd_dropped = 0;
1011                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1012                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1013
1014 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1015                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1016                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1017                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1018                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1019 #endif
1020 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1021                 fwd_streams[sm_id]->core_cycles = 0;
1022 #endif
1023         }
1024         if (with_tx_first) {
1025                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1026                 if (port_fwd_begin != NULL) {
1027                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1028                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1029                 }
1030                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1031                 rte_eal_mp_wait_lcore();
1032                 port_fwd_end = tx_only_engine.port_fwd_end;
1033                 if (port_fwd_end != NULL) {
1034                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1035                                 (*port_fwd_end)(fwd_ports_ids[i]);
1036                 }
1037         }
1038         launch_packet_forwarding(start_pkt_forward_on_core);
1039 }
1040
1041 void
1042 stop_packet_forwarding(void)
1043 {
1044         struct rte_eth_stats stats;
1045         struct rte_port *port;
1046         port_fwd_end_t  port_fwd_end;
1047         int i;
1048         portid_t   pt_id;
1049         streamid_t sm_id;
1050         lcoreid_t  lc_id;
1051         uint64_t total_recv;
1052         uint64_t total_xmit;
1053         uint64_t total_rx_dropped;
1054         uint64_t total_tx_dropped;
1055         uint64_t total_rx_nombuf;
1056         uint64_t tx_dropped;
1057         uint64_t rx_bad_ip_csum;
1058         uint64_t rx_bad_l4_csum;
1059 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1060         uint64_t fwd_cycles;
1061 #endif
1062         static const char *acc_stats_border = "+++++++++++++++";
1063
1064         if (all_ports_started() == 0) {
1065                 printf("Not all ports were started\n");
1066                 return;
1067         }
1068         if (test_done) {
1069                 printf("Packet forwarding not started\n");
1070                 return;
1071         }
1072         printf("Telling cores to stop...");
1073         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1074                 fwd_lcores[lc_id]->stopped = 1;
1075         printf("\nWaiting for lcores to finish...\n");
1076         rte_eal_mp_wait_lcore();
1077         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1078         if (port_fwd_end != NULL) {
1079                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1080                         pt_id = fwd_ports_ids[i];
1081                         (*port_fwd_end)(pt_id);
1082                 }
1083         }
1084 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1085         fwd_cycles = 0;
1086 #endif
1087         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1088                 if (cur_fwd_config.nb_fwd_streams >
1089                     cur_fwd_config.nb_fwd_ports) {
1090                         fwd_stream_stats_display(sm_id);
1091                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1092                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1093                 } else {
1094                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1095                                 fwd_streams[sm_id];
1096                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1097                                 fwd_streams[sm_id];
1098                 }
1099                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1100                 tx_dropped = (uint64_t) (tx_dropped +
1101                                          fwd_streams[sm_id]->fwd_dropped);
1102                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1103
1104                 rx_bad_ip_csum =
1105                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1106                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1107                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1108                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1109                                                         rx_bad_ip_csum;
1110
1111                 rx_bad_l4_csum =
1112                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1113                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1114                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1115                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1116                                                         rx_bad_l4_csum;
1117
1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1119                 fwd_cycles = (uint64_t) (fwd_cycles +
1120                                          fwd_streams[sm_id]->core_cycles);
1121 #endif
1122         }
1123         total_recv = 0;
1124         total_xmit = 0;
1125         total_rx_dropped = 0;
1126         total_tx_dropped = 0;
1127         total_rx_nombuf  = 0;
1128         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1129                 pt_id = fwd_ports_ids[i];
1130
1131                 port = &ports[pt_id];
1132                 rte_eth_stats_get(pt_id, &stats);
1133                 stats.ipackets -= port->stats.ipackets;
1134                 port->stats.ipackets = 0;
1135                 stats.opackets -= port->stats.opackets;
1136                 port->stats.opackets = 0;
1137                 stats.ibytes   -= port->stats.ibytes;
1138                 port->stats.ibytes = 0;
1139                 stats.obytes   -= port->stats.obytes;
1140                 port->stats.obytes = 0;
1141                 stats.imissed  -= port->stats.imissed;
1142                 port->stats.imissed = 0;
1143                 stats.oerrors  -= port->stats.oerrors;
1144                 port->stats.oerrors = 0;
1145                 stats.rx_nombuf -= port->stats.rx_nombuf;
1146                 port->stats.rx_nombuf = 0;
1147                 stats.fdirmatch -= port->stats.fdirmatch;
1148                 port->stats.rx_nombuf = 0;
1149                 stats.fdirmiss -= port->stats.fdirmiss;
1150                 port->stats.rx_nombuf = 0;
1151
1152                 total_recv += stats.ipackets;
1153                 total_xmit += stats.opackets;
1154                 total_rx_dropped += stats.imissed;
1155                 total_tx_dropped += port->tx_dropped;
1156                 total_rx_nombuf  += stats.rx_nombuf;
1157
1158                 fwd_port_stats_display(pt_id, &stats);
1159         }
1160         printf("\n  %s Accumulated forward statistics for all ports"
1161                "%s\n",
1162                acc_stats_border, acc_stats_border);
1163         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1164                "%-"PRIu64"\n"
1165                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1166                "%-"PRIu64"\n",
1167                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1168                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1169         if (total_rx_nombuf > 0)
1170                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1171         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1172                "%s\n",
1173                acc_stats_border, acc_stats_border);
1174 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1175         if (total_recv > 0)
1176                 printf("\n  CPU cycles/packet=%u (total cycles="
1177                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1178                        (unsigned int)(fwd_cycles / total_recv),
1179                        fwd_cycles, total_recv);
1180 #endif
1181         printf("\nDone.\n");
1182         test_done = 1;
1183 }
1184
1185 void
1186 dev_set_link_up(portid_t pid)
1187 {
1188         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1189                 printf("\nSet link up fail.\n");
1190 }
1191
1192 void
1193 dev_set_link_down(portid_t pid)
1194 {
1195         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1196                 printf("\nSet link down fail.\n");
1197 }
1198
1199 static int
1200 all_ports_started(void)
1201 {
1202         portid_t pi;
1203         struct rte_port *port;
1204
1205         FOREACH_PORT(pi, ports) {
1206                 port = &ports[pi];
1207                 /* Check if there is a port which is not started */
1208                 if (port->port_status != RTE_PORT_STARTED)
1209                         return 0;
1210         }
1211
1212         /* No port is not started */
1213         return 1;
1214 }
1215
1216 int
1217 all_ports_stopped(void)
1218 {
1219         portid_t pi;
1220         struct rte_port *port;
1221
1222         FOREACH_PORT(pi, ports) {
1223                 port = &ports[pi];
1224                 if (port->port_status != RTE_PORT_STOPPED)
1225                         return 0;
1226         }
1227
1228         return 1;
1229 }
1230
1231 int
1232 port_is_started(portid_t port_id)
1233 {
1234         if (port_id_is_invalid(port_id, ENABLED_WARN))
1235                 return 0;
1236
1237         if (ports[port_id].port_status != RTE_PORT_STARTED)
1238                 return 0;
1239
1240         return 1;
1241 }
1242
1243 static int
1244 port_is_closed(portid_t port_id)
1245 {
1246         if (port_id_is_invalid(port_id, ENABLED_WARN))
1247                 return 0;
1248
1249         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1250                 return 0;
1251
1252         return 1;
1253 }
1254
1255 int
1256 start_port(portid_t pid)
1257 {
1258         int diag, need_check_link_status = -1;
1259         portid_t pi;
1260         queueid_t qi;
1261         struct rte_port *port;
1262         struct ether_addr mac_addr;
1263
1264         if (test_done == 0) {
1265                 printf("Please stop forwarding first\n");
1266                 return -1;
1267         }
1268
1269         if (port_id_is_invalid(pid, ENABLED_WARN))
1270                 return 0;
1271
1272         if (init_fwd_streams() < 0) {
1273                 printf("Fail from init_fwd_streams()\n");
1274                 return -1;
1275         }
1276
1277         if(dcb_config)
1278                 dcb_test = 1;
1279         FOREACH_PORT(pi, ports) {
1280                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1281                         continue;
1282
1283                 need_check_link_status = 0;
1284                 port = &ports[pi];
1285                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1286                                                  RTE_PORT_HANDLING) == 0) {
1287                         printf("Port %d is now not stopped\n", pi);
1288                         continue;
1289                 }
1290
1291                 if (port->need_reconfig > 0) {
1292                         port->need_reconfig = 0;
1293
1294                         printf("Configuring Port %d (socket %u)\n", pi,
1295                                         port->socket_id);
1296                         /* configure port */
1297                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1298                                                 &(port->dev_conf));
1299                         if (diag != 0) {
1300                                 if (rte_atomic16_cmpset(&(port->port_status),
1301                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1302                                         printf("Port %d can not be set back "
1303                                                         "to stopped\n", pi);
1304                                 printf("Fail to configure port %d\n", pi);
1305                                 /* try to reconfigure port next time */
1306                                 port->need_reconfig = 1;
1307                                 return -1;
1308                         }
1309                 }
1310                 if (port->need_reconfig_queues > 0) {
1311                         port->need_reconfig_queues = 0;
1312                         /* setup tx queues */
1313                         for (qi = 0; qi < nb_txq; qi++) {
1314                                 if ((numa_support) &&
1315                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1316                                         diag = rte_eth_tx_queue_setup(pi, qi,
1317                                                 nb_txd,txring_numa[pi],
1318                                                 &(port->tx_conf));
1319                                 else
1320                                         diag = rte_eth_tx_queue_setup(pi, qi,
1321                                                 nb_txd,port->socket_id,
1322                                                 &(port->tx_conf));
1323
1324                                 if (diag == 0)
1325                                         continue;
1326
1327                                 /* Fail to setup tx queue, return */
1328                                 if (rte_atomic16_cmpset(&(port->port_status),
1329                                                         RTE_PORT_HANDLING,
1330                                                         RTE_PORT_STOPPED) == 0)
1331                                         printf("Port %d can not be set back "
1332                                                         "to stopped\n", pi);
1333                                 printf("Fail to configure port %d tx queues\n", pi);
1334                                 /* try to reconfigure queues next time */
1335                                 port->need_reconfig_queues = 1;
1336                                 return -1;
1337                         }
1338                         /* setup rx queues */
1339                         for (qi = 0; qi < nb_rxq; qi++) {
1340                                 if ((numa_support) &&
1341                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1342                                         struct rte_mempool * mp =
1343                                                 mbuf_pool_find(rxring_numa[pi]);
1344                                         if (mp == NULL) {
1345                                                 printf("Failed to setup RX queue:"
1346                                                         "No mempool allocation"
1347                                                         "on the socket %d\n",
1348                                                         rxring_numa[pi]);
1349                                                 return -1;
1350                                         }
1351
1352                                         diag = rte_eth_rx_queue_setup(pi, qi,
1353                                              nb_rxd,rxring_numa[pi],
1354                                              &(port->rx_conf),mp);
1355                                 }
1356                                 else
1357                                         diag = rte_eth_rx_queue_setup(pi, qi,
1358                                              nb_rxd,port->socket_id,
1359                                              &(port->rx_conf),
1360                                              mbuf_pool_find(port->socket_id));
1361
1362                                 if (diag == 0)
1363                                         continue;
1364
1365
1366                                 /* Fail to setup rx queue, return */
1367                                 if (rte_atomic16_cmpset(&(port->port_status),
1368                                                         RTE_PORT_HANDLING,
1369                                                         RTE_PORT_STOPPED) == 0)
1370                                         printf("Port %d can not be set back "
1371                                                         "to stopped\n", pi);
1372                                 printf("Fail to configure port %d rx queues\n", pi);
1373                                 /* try to reconfigure queues next time */
1374                                 port->need_reconfig_queues = 1;
1375                                 return -1;
1376                         }
1377                 }
1378                 /* start port */
1379                 if (rte_eth_dev_start(pi) < 0) {
1380                         printf("Fail to start port %d\n", pi);
1381
1382                         /* Fail to setup rx queue, return */
1383                         if (rte_atomic16_cmpset(&(port->port_status),
1384                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1385                                 printf("Port %d can not be set back to "
1386                                                         "stopped\n", pi);
1387                         continue;
1388                 }
1389
1390                 if (rte_atomic16_cmpset(&(port->port_status),
1391                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1392                         printf("Port %d can not be set into started\n", pi);
1393
1394                 rte_eth_macaddr_get(pi, &mac_addr);
1395                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1396                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1397                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1398                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1399
1400                 /* at least one port started, need checking link status */
1401                 need_check_link_status = 1;
1402         }
1403
1404         if (need_check_link_status == 1 && !no_link_check)
1405                 check_all_ports_link_status(RTE_PORT_ALL);
1406         else if (need_check_link_status == 0)
1407                 printf("Please stop the ports first\n");
1408
1409         printf("Done\n");
1410         return 0;
1411 }
1412
1413 void
1414 stop_port(portid_t pid)
1415 {
1416         portid_t pi;
1417         struct rte_port *port;
1418         int need_check_link_status = 0;
1419
1420         if (test_done == 0) {
1421                 printf("Please stop forwarding first\n");
1422                 return;
1423         }
1424         if (dcb_test) {
1425                 dcb_test = 0;
1426                 dcb_config = 0;
1427         }
1428
1429         if (port_id_is_invalid(pid, ENABLED_WARN))
1430                 return;
1431
1432         printf("Stopping ports...\n");
1433
1434         FOREACH_PORT(pi, ports) {
1435                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1436                         continue;
1437
1438                 port = &ports[pi];
1439                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1440                                                 RTE_PORT_HANDLING) == 0)
1441                         continue;
1442
1443                 rte_eth_dev_stop(pi);
1444
1445                 if (rte_atomic16_cmpset(&(port->port_status),
1446                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1447                         printf("Port %d can not be set into stopped\n", pi);
1448                 need_check_link_status = 1;
1449         }
1450         if (need_check_link_status && !no_link_check)
1451                 check_all_ports_link_status(RTE_PORT_ALL);
1452
1453         printf("Done\n");
1454 }
1455
1456 void
1457 close_port(portid_t pid)
1458 {
1459         portid_t pi;
1460         struct rte_port *port;
1461
1462         if (test_done == 0) {
1463                 printf("Please stop forwarding first\n");
1464                 return;
1465         }
1466
1467         if (port_id_is_invalid(pid, ENABLED_WARN))
1468                 return;
1469
1470         printf("Closing ports...\n");
1471
1472         FOREACH_PORT(pi, ports) {
1473                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1474                         continue;
1475
1476                 port = &ports[pi];
1477                 if (rte_atomic16_cmpset(&(port->port_status),
1478                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1479                         printf("Port %d is now not stopped\n", pi);
1480                         continue;
1481                 }
1482
1483                 rte_eth_dev_close(pi);
1484
1485                 if (rte_atomic16_cmpset(&(port->port_status),
1486                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1487                         printf("Port %d can not be set into stopped\n", pi);
1488         }
1489
1490         printf("Done\n");
1491 }
1492
1493 void
1494 attach_port(char *identifier)
1495 {
1496         portid_t i, j, pi = 0;
1497
1498         printf("Attaching a new port...\n");
1499
1500         if (identifier == NULL) {
1501                 printf("Invalid parameters are specified\n");
1502                 return;
1503         }
1504
1505         if (test_done == 0) {
1506                 printf("Please stop forwarding first\n");
1507                 return;
1508         }
1509
1510         if (rte_eth_dev_attach(identifier, &pi))
1511                 return;
1512
1513         ports[pi].enabled = 1;
1514         reconfig(pi, rte_eth_dev_socket_id(pi));
1515         rte_eth_promiscuous_enable(pi);
1516
1517         nb_ports = rte_eth_dev_count();
1518
1519         /* set_default_fwd_ports_config(); */
1520         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1521         i = 0;
1522         FOREACH_PORT(j, ports) {
1523                 fwd_ports_ids[i] = j;
1524                 i++;
1525         }
1526         nb_cfg_ports = nb_ports;
1527         nb_fwd_ports++;
1528
1529         ports[pi].port_status = RTE_PORT_STOPPED;
1530
1531         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1532         printf("Done\n");
1533 }
1534
1535 void
1536 detach_port(uint8_t port_id)
1537 {
1538         portid_t i, pi = 0;
1539         char name[RTE_ETH_NAME_MAX_LEN];
1540
1541         printf("Detaching a port...\n");
1542
1543         if (!port_is_closed(port_id)) {
1544                 printf("Please close port first\n");
1545                 return;
1546         }
1547
1548         rte_eth_promiscuous_disable(port_id);
1549
1550         if (rte_eth_dev_detach(port_id, name))
1551                 return;
1552
1553         ports[port_id].enabled = 0;
1554         nb_ports = rte_eth_dev_count();
1555
1556         /* set_default_fwd_ports_config(); */
1557         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1558         i = 0;
1559         FOREACH_PORT(pi, ports) {
1560                 fwd_ports_ids[i] = pi;
1561                 i++;
1562         }
1563         nb_cfg_ports = nb_ports;
1564         nb_fwd_ports--;
1565
1566         printf("Port '%s' is detached. Now total ports is %d\n",
1567                         name, nb_ports);
1568         printf("Done\n");
1569         return;
1570 }
1571
1572 void
1573 pmd_test_exit(void)
1574 {
1575         portid_t pt_id;
1576
1577         if (test_done == 0)
1578                 stop_packet_forwarding();
1579
1580         FOREACH_PORT(pt_id, ports) {
1581                 printf("Stopping port %d...", pt_id);
1582                 fflush(stdout);
1583                 rte_eth_dev_close(pt_id);
1584                 printf("done\n");
1585         }
1586         printf("bye...\n");
1587 }
1588
1589 typedef void (*cmd_func_t)(void);
1590 struct pmd_test_command {
1591         const char *cmd_name;
1592         cmd_func_t cmd_func;
1593 };
1594
1595 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1596
1597 /* Check the link status of all ports in up to 9s, and print them finally */
1598 static void
1599 check_all_ports_link_status(uint32_t port_mask)
1600 {
1601 #define CHECK_INTERVAL 100 /* 100ms */
1602 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1603         uint8_t portid, count, all_ports_up, print_flag = 0;
1604         struct rte_eth_link link;
1605
1606         printf("Checking link statuses...\n");
1607         fflush(stdout);
1608         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1609                 all_ports_up = 1;
1610                 FOREACH_PORT(portid, ports) {
1611                         if ((port_mask & (1 << portid)) == 0)
1612                                 continue;
1613                         memset(&link, 0, sizeof(link));
1614                         rte_eth_link_get_nowait(portid, &link);
1615                         /* print link status if flag set */
1616                         if (print_flag == 1) {
1617                                 if (link.link_status)
1618                                         printf("Port %d Link Up - speed %u "
1619                                                 "Mbps - %s\n", (uint8_t)portid,
1620                                                 (unsigned)link.link_speed,
1621                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1622                                         ("full-duplex") : ("half-duplex\n"));
1623                                 else
1624                                         printf("Port %d Link Down\n",
1625                                                 (uint8_t)portid);
1626                                 continue;
1627                         }
1628                         /* clear all_ports_up flag if any link down */
1629                         if (link.link_status == 0) {
1630                                 all_ports_up = 0;
1631                                 break;
1632                         }
1633                 }
1634                 /* after finally printing all link status, get out */
1635                 if (print_flag == 1)
1636                         break;
1637
1638                 if (all_ports_up == 0) {
1639                         fflush(stdout);
1640                         rte_delay_ms(CHECK_INTERVAL);
1641                 }
1642
1643                 /* set the print_flag if all ports up or timeout */
1644                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1645                         print_flag = 1;
1646                 }
1647         }
1648 }
1649
1650 static int
1651 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1652 {
1653         uint16_t i;
1654         int diag;
1655         uint8_t mapping_found = 0;
1656
1657         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1658                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1659                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1660                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1661                                         tx_queue_stats_mappings[i].queue_id,
1662                                         tx_queue_stats_mappings[i].stats_counter_id);
1663                         if (diag != 0)
1664                                 return diag;
1665                         mapping_found = 1;
1666                 }
1667         }
1668         if (mapping_found)
1669                 port->tx_queue_stats_mapping_enabled = 1;
1670         return 0;
1671 }
1672
1673 static int
1674 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1675 {
1676         uint16_t i;
1677         int diag;
1678         uint8_t mapping_found = 0;
1679
1680         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1681                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1682                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1683                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1684                                         rx_queue_stats_mappings[i].queue_id,
1685                                         rx_queue_stats_mappings[i].stats_counter_id);
1686                         if (diag != 0)
1687                                 return diag;
1688                         mapping_found = 1;
1689                 }
1690         }
1691         if (mapping_found)
1692                 port->rx_queue_stats_mapping_enabled = 1;
1693         return 0;
1694 }
1695
1696 static void
1697 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1698 {
1699         int diag = 0;
1700
1701         diag = set_tx_queue_stats_mapping_registers(pi, port);
1702         if (diag != 0) {
1703                 if (diag == -ENOTSUP) {
1704                         port->tx_queue_stats_mapping_enabled = 0;
1705                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1706                 }
1707                 else
1708                         rte_exit(EXIT_FAILURE,
1709                                         "set_tx_queue_stats_mapping_registers "
1710                                         "failed for port id=%d diag=%d\n",
1711                                         pi, diag);
1712         }
1713
1714         diag = set_rx_queue_stats_mapping_registers(pi, port);
1715         if (diag != 0) {
1716                 if (diag == -ENOTSUP) {
1717                         port->rx_queue_stats_mapping_enabled = 0;
1718                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1719                 }
1720                 else
1721                         rte_exit(EXIT_FAILURE,
1722                                         "set_rx_queue_stats_mapping_registers "
1723                                         "failed for port id=%d diag=%d\n",
1724                                         pi, diag);
1725         }
1726 }
1727
1728 static void
1729 rxtx_port_config(struct rte_port *port)
1730 {
1731         port->rx_conf = port->dev_info.default_rxconf;
1732         port->tx_conf = port->dev_info.default_txconf;
1733
1734         /* Check if any RX/TX parameters have been passed */
1735         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1736                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1737
1738         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1739                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1740
1741         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1742                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1743
1744         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1745                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1746
1747         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1748                 port->rx_conf.rx_drop_en = rx_drop_en;
1749
1750         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1751                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1752
1753         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1754                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1755
1756         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1757                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1758
1759         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1760                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1761
1762         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1763                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1764
1765         if (txq_flags != RTE_PMD_PARAM_UNSET)
1766                 port->tx_conf.txq_flags = txq_flags;
1767 }
1768
1769 void
1770 init_port_config(void)
1771 {
1772         portid_t pid;
1773         struct rte_port *port;
1774
1775         FOREACH_PORT(pid, ports) {
1776                 port = &ports[pid];
1777                 port->dev_conf.rxmode = rx_mode;
1778                 port->dev_conf.fdir_conf = fdir_conf;
1779                 if (nb_rxq > 1) {
1780                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1781                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1782                 } else {
1783                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1784                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1785                 }
1786
1787                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1788                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1789                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1790                         else
1791                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1792                 }
1793
1794                 if (port->dev_info.max_vfs != 0) {
1795                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1796                                 port->dev_conf.rxmode.mq_mode =
1797                                         ETH_MQ_RX_VMDQ_RSS;
1798                         else
1799                                 port->dev_conf.rxmode.mq_mode =
1800                                         ETH_MQ_RX_NONE;
1801
1802                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1803                 }
1804
1805                 rxtx_port_config(port);
1806
1807                 rte_eth_macaddr_get(pid, &port->eth_addr);
1808
1809                 map_port_queue_stats_mapping_registers(pid, port);
1810 #ifdef RTE_NIC_BYPASS
1811                 rte_eth_dev_bypass_init(pid);
1812 #endif
1813         }
1814 }
1815
1816 const uint16_t vlan_tags[] = {
1817                 0,  1,  2,  3,  4,  5,  6,  7,
1818                 8,  9, 10, 11,  12, 13, 14, 15,
1819                 16, 17, 18, 19, 20, 21, 22, 23,
1820                 24, 25, 26, 27, 28, 29, 30, 31
1821 };
1822
1823 static  int
1824 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1825 {
1826         uint8_t i;
1827
1828         /*
1829          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1830          * given above, and the number of traffic classes available for use.
1831          */
1832         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1833                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1834                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1835
1836                 /* VMDQ+DCB RX and TX configrations */
1837                 vmdq_rx_conf.enable_default_pool = 0;
1838                 vmdq_rx_conf.default_pool = 0;
1839                 vmdq_rx_conf.nb_queue_pools =
1840                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1841                 vmdq_tx_conf.nb_queue_pools =
1842                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1843
1844                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1845                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1846                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1847                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1848                 }
1849                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1850                         vmdq_rx_conf.dcb_queue[i] = i;
1851                         vmdq_tx_conf.dcb_queue[i] = i;
1852                 }
1853
1854                 /*set DCB mode of RX and TX of multiple queues*/
1855                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1856                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1857                 if (dcb_conf->pfc_en)
1858                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1859                 else
1860                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1861
1862                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1863                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1864                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1865                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1866         }
1867         else {
1868                 struct rte_eth_dcb_rx_conf rx_conf;
1869                 struct rte_eth_dcb_tx_conf tx_conf;
1870
1871                 /* queue mapping configuration of DCB RX and TX */
1872                 if (dcb_conf->num_tcs == ETH_4_TCS)
1873                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1874                 else
1875                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1876
1877                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1878                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1879
1880                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1881                         rx_conf.dcb_queue[i] = i;
1882                         tx_conf.dcb_queue[i] = i;
1883                 }
1884                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1885                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1886                 if (dcb_conf->pfc_en)
1887                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1888                 else
1889                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1890
1891                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1892                                 sizeof(struct rte_eth_dcb_rx_conf)));
1893                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1894                                 sizeof(struct rte_eth_dcb_tx_conf)));
1895         }
1896
1897         return 0;
1898 }
1899
1900 int
1901 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1902 {
1903         struct rte_eth_conf port_conf;
1904         struct rte_port *rte_port;
1905         int retval;
1906         uint16_t nb_vlan;
1907         uint16_t i;
1908
1909         /* rxq and txq configuration in dcb mode */
1910         nb_rxq = 128;
1911         nb_txq = 128;
1912         rx_free_thresh = 64;
1913
1914         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1915         /* Enter DCB configuration status */
1916         dcb_config = 1;
1917
1918         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1919         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1920         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1921         if (retval < 0)
1922                 return retval;
1923
1924         rte_port = &ports[pid];
1925         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1926
1927         rxtx_port_config(rte_port);
1928         /* VLAN filter */
1929         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1930         for (i = 0; i < nb_vlan; i++){
1931                 rx_vft_set(pid, vlan_tags[i], 1);
1932         }
1933
1934         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1935         map_port_queue_stats_mapping_registers(pid, rte_port);
1936
1937         rte_port->dcb_flag = 1;
1938
1939         return 0;
1940 }
1941
1942 static void
1943 init_port(void)
1944 {
1945         portid_t pid;
1946
1947         /* Configuration of Ethernet ports. */
1948         ports = rte_zmalloc("testpmd: ports",
1949                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1950                             RTE_CACHE_LINE_SIZE);
1951         if (ports == NULL) {
1952                 rte_exit(EXIT_FAILURE,
1953                                 "rte_zmalloc(%d struct rte_port) failed\n",
1954                                 RTE_MAX_ETHPORTS);
1955         }
1956
1957         /* enabled allocated ports */
1958         for (pid = 0; pid < nb_ports; pid++)
1959                 ports[pid].enabled = 1;
1960 }
1961
1962 int
1963 main(int argc, char** argv)
1964 {
1965         int  diag;
1966         uint8_t port_id;
1967
1968         diag = rte_eal_init(argc, argv);
1969         if (diag < 0)
1970                 rte_panic("Cannot init EAL\n");
1971
1972         nb_ports = (portid_t) rte_eth_dev_count();
1973         if (nb_ports == 0)
1974                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
1975
1976         /* allocate port structures, and init them */
1977         init_port();
1978
1979         set_def_fwd_config();
1980         if (nb_lcores == 0)
1981                 rte_panic("Empty set of forwarding logical cores - check the "
1982                           "core mask supplied in the command parameters\n");
1983
1984         argc -= diag;
1985         argv += diag;
1986         if (argc > 1)
1987                 launch_args_parse(argc, argv);
1988
1989         if (nb_rxq > nb_txq)
1990                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1991                        "but nb_txq=%d will prevent to fully test it.\n",
1992                        nb_rxq, nb_txq);
1993
1994         init_config();
1995         if (start_port(RTE_PORT_ALL) != 0)
1996                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1997
1998         /* set all ports to promiscuous mode by default */
1999         FOREACH_PORT(port_id, ports)
2000                 rte_eth_promiscuous_enable(port_id);
2001
2002 #ifdef RTE_LIBRTE_CMDLINE
2003         if (interactive == 1) {
2004                 if (auto_start) {
2005                         printf("Start automatic packet forwarding\n");
2006                         start_packet_forwarding(0);
2007                 }
2008                 prompt();
2009         } else
2010 #endif
2011         {
2012                 char c;
2013                 int rc;
2014
2015                 printf("No commandline core given, start packet forwarding\n");
2016                 start_packet_forwarding(0);
2017                 printf("Press enter to exit\n");
2018                 rc = read(0, &c, 1);
2019                 if (rc < 0)
2020                         return 1;
2021         }
2022
2023         return 0;
2024 }