app/testpmd: use default Rx/Tx port configuration
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .flexbytes_offset = 0x6,
290         .drop_queue = 127,
291 };
292
293 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
294
295 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
296 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
297
298 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
299 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
300
301 uint16_t nb_tx_queue_stats_mappings = 0;
302 uint16_t nb_rx_queue_stats_mappings = 0;
303
304 /* Forward function declarations */
305 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
306 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
307
308 /*
309  * Check if all the ports are started.
310  * If yes, return positive value. If not, return zero.
311  */
312 static int all_ports_started(void);
313
314 /*
315  * Setup default configuration.
316  */
317 static void
318 set_default_fwd_lcores_config(void)
319 {
320         unsigned int i;
321         unsigned int nb_lc;
322
323         nb_lc = 0;
324         for (i = 0; i < RTE_MAX_LCORE; i++) {
325                 if (! rte_lcore_is_enabled(i))
326                         continue;
327                 if (i == rte_get_master_lcore())
328                         continue;
329                 fwd_lcores_cpuids[nb_lc++] = i;
330         }
331         nb_lcores = (lcoreid_t) nb_lc;
332         nb_cfg_lcores = nb_lcores;
333         nb_fwd_lcores = 1;
334 }
335
336 static void
337 set_def_peer_eth_addrs(void)
338 {
339         portid_t i;
340
341         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
342                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
343                 peer_eth_addrs[i].addr_bytes[5] = i;
344         }
345 }
346
347 static void
348 set_default_fwd_ports_config(void)
349 {
350         portid_t pt_id;
351
352         for (pt_id = 0; pt_id < nb_ports; pt_id++)
353                 fwd_ports_ids[pt_id] = pt_id;
354
355         nb_cfg_ports = nb_ports;
356         nb_fwd_ports = nb_ports;
357 }
358
359 void
360 set_def_fwd_config(void)
361 {
362         set_default_fwd_lcores_config();
363         set_def_peer_eth_addrs();
364         set_default_fwd_ports_config();
365 }
366
367 /*
368  * Configuration initialisation done once at init time.
369  */
370 struct mbuf_ctor_arg {
371         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
372         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
373 };
374
375 struct mbuf_pool_ctor_arg {
376         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
377 };
378
379 static void
380 testpmd_mbuf_ctor(struct rte_mempool *mp,
381                   void *opaque_arg,
382                   void *raw_mbuf,
383                   __attribute__((unused)) unsigned i)
384 {
385         struct mbuf_ctor_arg *mb_ctor_arg;
386         struct rte_mbuf    *mb;
387
388         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
389         mb = (struct rte_mbuf *) raw_mbuf;
390
391         mb->pool         = mp;
392         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
393         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
394                         mb_ctor_arg->seg_buf_offset);
395         mb->buf_len      = mb_ctor_arg->seg_buf_size;
396         mb->ol_flags     = 0;
397         mb->data_off     = RTE_PKTMBUF_HEADROOM;
398         mb->nb_segs      = 1;
399         mb->tx_offload   = 0;
400         mb->vlan_tci     = 0;
401         mb->hash.rss     = 0;
402 }
403
404 static void
405 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
406                        void *opaque_arg)
407 {
408         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
409         struct rte_pktmbuf_pool_private *mbp_priv;
410
411         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
412                 printf("%s(%s) private_data_size %d < %d\n",
413                        __func__, mp->name, (int) mp->private_data_size,
414                        (int) sizeof(struct rte_pktmbuf_pool_private));
415                 return;
416         }
417         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
418         mbp_priv = rte_mempool_get_priv(mp);
419         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
420 }
421
422 static void
423 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
424                  unsigned int socket_id)
425 {
426         char pool_name[RTE_MEMPOOL_NAMESIZE];
427         struct rte_mempool *rte_mp;
428         struct mbuf_pool_ctor_arg mbp_ctor_arg;
429         struct mbuf_ctor_arg mb_ctor_arg;
430         uint32_t mb_size;
431
432         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
433                                                 mbuf_seg_size);
434         mb_ctor_arg.seg_buf_offset =
435                 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
436         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
437         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
438         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
439
440 #ifdef RTE_LIBRTE_PMD_XENVIRT
441         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
442                                    (unsigned) mb_mempool_cache,
443                                    sizeof(struct rte_pktmbuf_pool_private),
444                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
445                                    testpmd_mbuf_ctor, &mb_ctor_arg,
446                                    socket_id, 0);
447
448
449
450 #else
451         if (mp_anon != 0)
452                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
453                                     (unsigned) mb_mempool_cache,
454                                     sizeof(struct rte_pktmbuf_pool_private),
455                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456                                     testpmd_mbuf_ctor, &mb_ctor_arg,
457                                     socket_id, 0);
458         else
459                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
460                                     (unsigned) mb_mempool_cache,
461                                     sizeof(struct rte_pktmbuf_pool_private),
462                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
463                                     testpmd_mbuf_ctor, &mb_ctor_arg,
464                                     socket_id, 0);
465
466 #endif
467
468         if (rte_mp == NULL) {
469                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
470                                                 "failed\n", socket_id);
471         } else if (verbose_level > 0) {
472                 rte_mempool_dump(stdout, rte_mp);
473         }
474 }
475
476 /*
477  * Check given socket id is valid or not with NUMA mode,
478  * if valid, return 0, else return -1
479  */
480 static int
481 check_socket_id(const unsigned int socket_id)
482 {
483         static int warning_once = 0;
484
485         if (socket_id >= MAX_SOCKET) {
486                 if (!warning_once && numa_support)
487                         printf("Warning: NUMA should be configured manually by"
488                                " using --port-numa-config and"
489                                " --ring-numa-config parameters along with"
490                                " --numa.\n");
491                 warning_once = 1;
492                 return -1;
493         }
494         return 0;
495 }
496
497 static void
498 init_config(void)
499 {
500         portid_t pid;
501         struct rte_port *port;
502         struct rte_mempool *mbp;
503         unsigned int nb_mbuf_per_pool;
504         lcoreid_t  lc_id;
505         uint8_t port_per_socket[MAX_SOCKET];
506
507         memset(port_per_socket,0,MAX_SOCKET);
508         /* Configuration of logical cores. */
509         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
510                                 sizeof(struct fwd_lcore *) * nb_lcores,
511                                 RTE_CACHE_LINE_SIZE);
512         if (fwd_lcores == NULL) {
513                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
514                                                         "failed\n", nb_lcores);
515         }
516         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
517                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
518                                                sizeof(struct fwd_lcore),
519                                                RTE_CACHE_LINE_SIZE);
520                 if (fwd_lcores[lc_id] == NULL) {
521                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
522                                                                 "failed\n");
523                 }
524                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
525         }
526
527         /*
528          * Create pools of mbuf.
529          * If NUMA support is disabled, create a single pool of mbuf in
530          * socket 0 memory by default.
531          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
532          *
533          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
534          * nb_txd can be configured at run time.
535          */
536         if (param_total_num_mbufs)
537                 nb_mbuf_per_pool = param_total_num_mbufs;
538         else {
539                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
540                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
541
542                 if (!numa_support)
543                         nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
544         }
545
546         if (!numa_support) {
547                 if (socket_num == UMA_NO_CONFIG)
548                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
549                 else
550                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
551                                                  socket_num);
552         }
553
554         /* Configuration of Ethernet ports. */
555         ports = rte_zmalloc("testpmd: ports",
556                             sizeof(struct rte_port) * nb_ports,
557                             RTE_CACHE_LINE_SIZE);
558         if (ports == NULL) {
559                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
560                                                         "failed\n", nb_ports);
561         }
562
563         for (pid = 0; pid < nb_ports; pid++) {
564                 port = &ports[pid];
565                 rte_eth_dev_info_get(pid, &port->dev_info);
566
567                 if (numa_support) {
568                         if (port_numa[pid] != NUMA_NO_CONFIG)
569                                 port_per_socket[port_numa[pid]]++;
570                         else {
571                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
572
573                                 /* if socket_id is invalid, set to 0 */
574                                 if (check_socket_id(socket_id) < 0)
575                                         socket_id = 0;
576                                 port_per_socket[socket_id]++;
577                         }
578                 }
579
580                 /* set flag to initialize port/queue */
581                 port->need_reconfig = 1;
582                 port->need_reconfig_queues = 1;
583         }
584
585         if (numa_support) {
586                 uint8_t i;
587                 unsigned int nb_mbuf;
588
589                 if (param_total_num_mbufs)
590                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
591
592                 for (i = 0; i < MAX_SOCKET; i++) {
593                         nb_mbuf = (nb_mbuf_per_pool *
594                                                 port_per_socket[i]);
595                         if (nb_mbuf)
596                                 mbuf_pool_create(mbuf_data_size,
597                                                 nb_mbuf,i);
598                 }
599         }
600         init_port_config();
601
602         /*
603          * Records which Mbuf pool to use by each logical core, if needed.
604          */
605         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
606                 mbp = mbuf_pool_find(
607                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
608
609                 if (mbp == NULL)
610                         mbp = mbuf_pool_find(0);
611                 fwd_lcores[lc_id]->mbp = mbp;
612         }
613
614         /* Configuration of packet forwarding streams. */
615         if (init_fwd_streams() < 0)
616                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
617 }
618
619
620 void
621 reconfig(portid_t new_port_id, unsigned socket_id)
622 {
623         struct rte_port *port;
624
625         /* Reconfiguration of Ethernet ports. */
626         ports = rte_realloc(ports,
627                             sizeof(struct rte_port) * nb_ports,
628                             RTE_CACHE_LINE_SIZE);
629         if (ports == NULL) {
630                 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
631                                 nb_ports);
632         }
633
634         port = &ports[new_port_id];
635         rte_eth_dev_info_get(new_port_id, &port->dev_info);
636
637         /* set flag to initialize port/queue */
638         port->need_reconfig = 1;
639         port->need_reconfig_queues = 1;
640         port->socket_id = socket_id;
641
642         init_port_config();
643 }
644
645
646 int
647 init_fwd_streams(void)
648 {
649         portid_t pid;
650         struct rte_port *port;
651         streamid_t sm_id, nb_fwd_streams_new;
652
653         /* set socket id according to numa or not */
654         for (pid = 0; pid < nb_ports; pid++) {
655                 port = &ports[pid];
656                 if (nb_rxq > port->dev_info.max_rx_queues) {
657                         printf("Fail: nb_rxq(%d) is greater than "
658                                 "max_rx_queues(%d)\n", nb_rxq,
659                                 port->dev_info.max_rx_queues);
660                         return -1;
661                 }
662                 if (nb_txq > port->dev_info.max_tx_queues) {
663                         printf("Fail: nb_txq(%d) is greater than "
664                                 "max_tx_queues(%d)\n", nb_txq,
665                                 port->dev_info.max_tx_queues);
666                         return -1;
667                 }
668                 if (numa_support) {
669                         if (port_numa[pid] != NUMA_NO_CONFIG)
670                                 port->socket_id = port_numa[pid];
671                         else {
672                                 port->socket_id = rte_eth_dev_socket_id(pid);
673
674                                 /* if socket_id is invalid, set to 0 */
675                                 if (check_socket_id(port->socket_id) < 0)
676                                         port->socket_id = 0;
677                         }
678                 }
679                 else {
680                         if (socket_num == UMA_NO_CONFIG)
681                                 port->socket_id = 0;
682                         else
683                                 port->socket_id = socket_num;
684                 }
685         }
686
687         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
688         if (nb_fwd_streams_new == nb_fwd_streams)
689                 return 0;
690         /* clear the old */
691         if (fwd_streams != NULL) {
692                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693                         if (fwd_streams[sm_id] == NULL)
694                                 continue;
695                         rte_free(fwd_streams[sm_id]);
696                         fwd_streams[sm_id] = NULL;
697                 }
698                 rte_free(fwd_streams);
699                 fwd_streams = NULL;
700         }
701
702         /* init new */
703         nb_fwd_streams = nb_fwd_streams_new;
704         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
705                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
706         if (fwd_streams == NULL)
707                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
708                                                 "failed\n", nb_fwd_streams);
709
710         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
711                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
712                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
713                 if (fwd_streams[sm_id] == NULL)
714                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
715                                                                 " failed\n");
716         }
717
718         return 0;
719 }
720
721 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
722 static void
723 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
724 {
725         unsigned int total_burst;
726         unsigned int nb_burst;
727         unsigned int burst_stats[3];
728         uint16_t pktnb_stats[3];
729         uint16_t nb_pkt;
730         int burst_percent[3];
731
732         /*
733          * First compute the total number of packet bursts and the
734          * two highest numbers of bursts of the same number of packets.
735          */
736         total_burst = 0;
737         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
738         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
739         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
740                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
741                 if (nb_burst == 0)
742                         continue;
743                 total_burst += nb_burst;
744                 if (nb_burst > burst_stats[0]) {
745                         burst_stats[1] = burst_stats[0];
746                         pktnb_stats[1] = pktnb_stats[0];
747                         burst_stats[0] = nb_burst;
748                         pktnb_stats[0] = nb_pkt;
749                 }
750         }
751         if (total_burst == 0)
752                 return;
753         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
754         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
755                burst_percent[0], (int) pktnb_stats[0]);
756         if (burst_stats[0] == total_burst) {
757                 printf("]\n");
758                 return;
759         }
760         if (burst_stats[0] + burst_stats[1] == total_burst) {
761                 printf(" + %d%% of %d pkts]\n",
762                        100 - burst_percent[0], pktnb_stats[1]);
763                 return;
764         }
765         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
766         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
767         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
768                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
769                 return;
770         }
771         printf(" + %d%% of %d pkts + %d%% of others]\n",
772                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
773 }
774 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
775
776 static void
777 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
778 {
779         struct rte_port *port;
780         uint8_t i;
781
782         static const char *fwd_stats_border = "----------------------";
783
784         port = &ports[port_id];
785         printf("\n  %s Forward statistics for port %-2d %s\n",
786                fwd_stats_border, port_id, fwd_stats_border);
787
788         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
789                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
790                        "%-"PRIu64"\n",
791                        stats->ipackets, stats->imissed,
792                        (uint64_t) (stats->ipackets + stats->imissed));
793
794                 if (cur_fwd_eng == &csum_fwd_engine)
795                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
796                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
797                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
798                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
799                                "RX-error: %-"PRIu64"\n",
800                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
801                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
802                 }
803
804                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
805                        "%-"PRIu64"\n",
806                        stats->opackets, port->tx_dropped,
807                        (uint64_t) (stats->opackets + port->tx_dropped));
808         }
809         else {
810                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
811                        "%14"PRIu64"\n",
812                        stats->ipackets, stats->imissed,
813                        (uint64_t) (stats->ipackets + stats->imissed));
814
815                 if (cur_fwd_eng == &csum_fwd_engine)
816                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
817                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
818                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
819                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
820                                "    RX-error:%"PRIu64"\n",
821                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
822                         printf("  RX-nombufs:             %14"PRIu64"\n",
823                                stats->rx_nombuf);
824                 }
825
826                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
827                        "%14"PRIu64"\n",
828                        stats->opackets, port->tx_dropped,
829                        (uint64_t) (stats->opackets + port->tx_dropped));
830         }
831
832         /* Display statistics of XON/XOFF pause frames, if any. */
833         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
834              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
835                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
836                        stats->rx_pause_xoff, stats->rx_pause_xon);
837                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
838                        stats->tx_pause_xoff, stats->tx_pause_xon);
839         }
840
841 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
842         if (port->rx_stream)
843                 pkt_burst_stats_display("RX",
844                         &port->rx_stream->rx_burst_stats);
845         if (port->tx_stream)
846                 pkt_burst_stats_display("TX",
847                         &port->tx_stream->tx_burst_stats);
848 #endif
849         /* stats fdir */
850         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
851                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
852                        stats->fdirmiss,
853                        stats->fdirmatch);
854
855         if (port->rx_queue_stats_mapping_enabled) {
856                 printf("\n");
857                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
858                         printf("  Stats reg %2d RX-packets:%14"PRIu64
859                                "     RX-errors:%14"PRIu64
860                                "    RX-bytes:%14"PRIu64"\n",
861                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
862                 }
863                 printf("\n");
864         }
865         if (port->tx_queue_stats_mapping_enabled) {
866                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
867                         printf("  Stats reg %2d TX-packets:%14"PRIu64
868                                "                                 TX-bytes:%14"PRIu64"\n",
869                                i, stats->q_opackets[i], stats->q_obytes[i]);
870                 }
871         }
872
873         printf("  %s--------------------------------%s\n",
874                fwd_stats_border, fwd_stats_border);
875 }
876
877 static void
878 fwd_stream_stats_display(streamid_t stream_id)
879 {
880         struct fwd_stream *fs;
881         static const char *fwd_top_stats_border = "-------";
882
883         fs = fwd_streams[stream_id];
884         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
885             (fs->fwd_dropped == 0))
886                 return;
887         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
888                "TX Port=%2d/Queue=%2d %s\n",
889                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
890                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
891         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
892                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
893
894         /* if checksum mode */
895         if (cur_fwd_eng == &csum_fwd_engine) {
896                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
897                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
898         }
899
900 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
901         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
902         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
903 #endif
904 }
905
906 static void
907 flush_fwd_rx_queues(void)
908 {
909         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
910         portid_t  rxp;
911         portid_t port_id;
912         queueid_t rxq;
913         uint16_t  nb_rx;
914         uint16_t  i;
915         uint8_t   j;
916
917         for (j = 0; j < 2; j++) {
918                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
919                         for (rxq = 0; rxq < nb_rxq; rxq++) {
920                                 port_id = fwd_ports_ids[rxp];
921                                 do {
922                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
923                                                 pkts_burst, MAX_PKT_BURST);
924                                         for (i = 0; i < nb_rx; i++)
925                                                 rte_pktmbuf_free(pkts_burst[i]);
926                                 } while (nb_rx > 0);
927                         }
928                 }
929                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
930         }
931 }
932
933 static void
934 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
935 {
936         struct fwd_stream **fsm;
937         streamid_t nb_fs;
938         streamid_t sm_id;
939
940         fsm = &fwd_streams[fc->stream_idx];
941         nb_fs = fc->stream_nb;
942         do {
943                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
944                         (*pkt_fwd)(fsm[sm_id]);
945         } while (! fc->stopped);
946 }
947
948 static int
949 start_pkt_forward_on_core(void *fwd_arg)
950 {
951         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
952                              cur_fwd_config.fwd_eng->packet_fwd);
953         return 0;
954 }
955
956 /*
957  * Run the TXONLY packet forwarding engine to send a single burst of packets.
958  * Used to start communication flows in network loopback test configurations.
959  */
960 static int
961 run_one_txonly_burst_on_core(void *fwd_arg)
962 {
963         struct fwd_lcore *fwd_lc;
964         struct fwd_lcore tmp_lcore;
965
966         fwd_lc = (struct fwd_lcore *) fwd_arg;
967         tmp_lcore = *fwd_lc;
968         tmp_lcore.stopped = 1;
969         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
970         return 0;
971 }
972
973 /*
974  * Launch packet forwarding:
975  *     - Setup per-port forwarding context.
976  *     - launch logical cores with their forwarding configuration.
977  */
978 static void
979 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
980 {
981         port_fwd_begin_t port_fwd_begin;
982         unsigned int i;
983         unsigned int lc_id;
984         int diag;
985
986         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
987         if (port_fwd_begin != NULL) {
988                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
989                         (*port_fwd_begin)(fwd_ports_ids[i]);
990         }
991         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
992                 lc_id = fwd_lcores_cpuids[i];
993                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
994                         fwd_lcores[i]->stopped = 0;
995                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
996                                                      fwd_lcores[i], lc_id);
997                         if (diag != 0)
998                                 printf("launch lcore %u failed - diag=%d\n",
999                                        lc_id, diag);
1000                 }
1001         }
1002 }
1003
1004 /*
1005  * Launch packet forwarding configuration.
1006  */
1007 void
1008 start_packet_forwarding(int with_tx_first)
1009 {
1010         port_fwd_begin_t port_fwd_begin;
1011         port_fwd_end_t  port_fwd_end;
1012         struct rte_port *port;
1013         unsigned int i;
1014         portid_t   pt_id;
1015         streamid_t sm_id;
1016
1017         if (all_ports_started() == 0) {
1018                 printf("Not all ports were started\n");
1019                 return;
1020         }
1021         if (test_done == 0) {
1022                 printf("Packet forwarding already started\n");
1023                 return;
1024         }
1025         if(dcb_test) {
1026                 for (i = 0; i < nb_fwd_ports; i++) {
1027                         pt_id = fwd_ports_ids[i];
1028                         port = &ports[pt_id];
1029                         if (!port->dcb_flag) {
1030                                 printf("In DCB mode, all forwarding ports must "
1031                                        "be configured in this mode.\n");
1032                                 return;
1033                         }
1034                 }
1035                 if (nb_fwd_lcores == 1) {
1036                         printf("In DCB mode,the nb forwarding cores "
1037                                "should be larger than 1.\n");
1038                         return;
1039                 }
1040         }
1041         test_done = 0;
1042
1043         if(!no_flush_rx)
1044                 flush_fwd_rx_queues();
1045
1046         fwd_config_setup();
1047         rxtx_config_display();
1048
1049         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1050                 pt_id = fwd_ports_ids[i];
1051                 port = &ports[pt_id];
1052                 rte_eth_stats_get(pt_id, &port->stats);
1053                 port->tx_dropped = 0;
1054
1055                 map_port_queue_stats_mapping_registers(pt_id, port);
1056         }
1057         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1058                 fwd_streams[sm_id]->rx_packets = 0;
1059                 fwd_streams[sm_id]->tx_packets = 0;
1060                 fwd_streams[sm_id]->fwd_dropped = 0;
1061                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1062                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1063
1064 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1065                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1066                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1067                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1068                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1069 #endif
1070 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1071                 fwd_streams[sm_id]->core_cycles = 0;
1072 #endif
1073         }
1074         if (with_tx_first) {
1075                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1076                 if (port_fwd_begin != NULL) {
1077                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1078                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1079                 }
1080                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1081                 rte_eal_mp_wait_lcore();
1082                 port_fwd_end = tx_only_engine.port_fwd_end;
1083                 if (port_fwd_end != NULL) {
1084                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1085                                 (*port_fwd_end)(fwd_ports_ids[i]);
1086                 }
1087         }
1088         launch_packet_forwarding(start_pkt_forward_on_core);
1089 }
1090
1091 void
1092 stop_packet_forwarding(void)
1093 {
1094         struct rte_eth_stats stats;
1095         struct rte_port *port;
1096         port_fwd_end_t  port_fwd_end;
1097         int i;
1098         portid_t   pt_id;
1099         streamid_t sm_id;
1100         lcoreid_t  lc_id;
1101         uint64_t total_recv;
1102         uint64_t total_xmit;
1103         uint64_t total_rx_dropped;
1104         uint64_t total_tx_dropped;
1105         uint64_t total_rx_nombuf;
1106         uint64_t tx_dropped;
1107         uint64_t rx_bad_ip_csum;
1108         uint64_t rx_bad_l4_csum;
1109 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1110         uint64_t fwd_cycles;
1111 #endif
1112         static const char *acc_stats_border = "+++++++++++++++";
1113
1114         if (all_ports_started() == 0) {
1115                 printf("Not all ports were started\n");
1116                 return;
1117         }
1118         if (test_done) {
1119                 printf("Packet forwarding not started\n");
1120                 return;
1121         }
1122         printf("Telling cores to stop...");
1123         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1124                 fwd_lcores[lc_id]->stopped = 1;
1125         printf("\nWaiting for lcores to finish...\n");
1126         rte_eal_mp_wait_lcore();
1127         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1128         if (port_fwd_end != NULL) {
1129                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1130                         pt_id = fwd_ports_ids[i];
1131                         (*port_fwd_end)(pt_id);
1132                 }
1133         }
1134 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1135         fwd_cycles = 0;
1136 #endif
1137         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1138                 if (cur_fwd_config.nb_fwd_streams >
1139                     cur_fwd_config.nb_fwd_ports) {
1140                         fwd_stream_stats_display(sm_id);
1141                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1142                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1143                 } else {
1144                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1145                                 fwd_streams[sm_id];
1146                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1147                                 fwd_streams[sm_id];
1148                 }
1149                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1150                 tx_dropped = (uint64_t) (tx_dropped +
1151                                          fwd_streams[sm_id]->fwd_dropped);
1152                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1153
1154                 rx_bad_ip_csum =
1155                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1156                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1157                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1158                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1159                                                         rx_bad_ip_csum;
1160
1161                 rx_bad_l4_csum =
1162                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1163                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1164                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1165                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1166                                                         rx_bad_l4_csum;
1167
1168 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1169                 fwd_cycles = (uint64_t) (fwd_cycles +
1170                                          fwd_streams[sm_id]->core_cycles);
1171 #endif
1172         }
1173         total_recv = 0;
1174         total_xmit = 0;
1175         total_rx_dropped = 0;
1176         total_tx_dropped = 0;
1177         total_rx_nombuf  = 0;
1178         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1179                 pt_id = fwd_ports_ids[i];
1180
1181                 port = &ports[pt_id];
1182                 rte_eth_stats_get(pt_id, &stats);
1183                 stats.ipackets -= port->stats.ipackets;
1184                 port->stats.ipackets = 0;
1185                 stats.opackets -= port->stats.opackets;
1186                 port->stats.opackets = 0;
1187                 stats.ibytes   -= port->stats.ibytes;
1188                 port->stats.ibytes = 0;
1189                 stats.obytes   -= port->stats.obytes;
1190                 port->stats.obytes = 0;
1191                 stats.imissed  -= port->stats.imissed;
1192                 port->stats.imissed = 0;
1193                 stats.oerrors  -= port->stats.oerrors;
1194                 port->stats.oerrors = 0;
1195                 stats.rx_nombuf -= port->stats.rx_nombuf;
1196                 port->stats.rx_nombuf = 0;
1197                 stats.fdirmatch -= port->stats.fdirmatch;
1198                 port->stats.rx_nombuf = 0;
1199                 stats.fdirmiss -= port->stats.fdirmiss;
1200                 port->stats.rx_nombuf = 0;
1201
1202                 total_recv += stats.ipackets;
1203                 total_xmit += stats.opackets;
1204                 total_rx_dropped += stats.imissed;
1205                 total_tx_dropped += port->tx_dropped;
1206                 total_rx_nombuf  += stats.rx_nombuf;
1207
1208                 fwd_port_stats_display(pt_id, &stats);
1209         }
1210         printf("\n  %s Accumulated forward statistics for all ports"
1211                "%s\n",
1212                acc_stats_border, acc_stats_border);
1213         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1214                "%-"PRIu64"\n"
1215                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1216                "%-"PRIu64"\n",
1217                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1218                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1219         if (total_rx_nombuf > 0)
1220                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1221         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1222                "%s\n",
1223                acc_stats_border, acc_stats_border);
1224 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225         if (total_recv > 0)
1226                 printf("\n  CPU cycles/packet=%u (total cycles="
1227                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1228                        (unsigned int)(fwd_cycles / total_recv),
1229                        fwd_cycles, total_recv);
1230 #endif
1231         printf("\nDone.\n");
1232         test_done = 1;
1233 }
1234
1235 void
1236 dev_set_link_up(portid_t pid)
1237 {
1238         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1239                 printf("\nSet link up fail.\n");
1240 }
1241
1242 void
1243 dev_set_link_down(portid_t pid)
1244 {
1245         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1246                 printf("\nSet link down fail.\n");
1247 }
1248
1249 static int
1250 all_ports_started(void)
1251 {
1252         portid_t pi;
1253         struct rte_port *port;
1254
1255         for (pi = 0; pi < nb_ports; pi++) {
1256                 port = &ports[pi];
1257                 /* Check if there is a port which is not started */
1258                 if (port->port_status != RTE_PORT_STARTED)
1259                         return 0;
1260         }
1261
1262         /* No port is not started */
1263         return 1;
1264 }
1265
1266 int
1267 start_port(portid_t pid)
1268 {
1269         int diag, need_check_link_status = 0;
1270         portid_t pi;
1271         queueid_t qi;
1272         struct rte_port *port;
1273         struct ether_addr mac_addr;
1274
1275         if (test_done == 0) {
1276                 printf("Please stop forwarding first\n");
1277                 return -1;
1278         }
1279
1280         if (init_fwd_streams() < 0) {
1281                 printf("Fail from init_fwd_streams()\n");
1282                 return -1;
1283         }
1284
1285         if(dcb_config)
1286                 dcb_test = 1;
1287         for (pi = 0; pi < nb_ports; pi++) {
1288                 if (pid < nb_ports && pid != pi)
1289                         continue;
1290
1291                 port = &ports[pi];
1292                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1293                                                  RTE_PORT_HANDLING) == 0) {
1294                         printf("Port %d is now not stopped\n", pi);
1295                         continue;
1296                 }
1297
1298                 if (port->need_reconfig > 0) {
1299                         port->need_reconfig = 0;
1300
1301                         printf("Configuring Port %d (socket %u)\n", pi,
1302                                         port->socket_id);
1303                         /* configure port */
1304                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1305                                                 &(port->dev_conf));
1306                         if (diag != 0) {
1307                                 if (rte_atomic16_cmpset(&(port->port_status),
1308                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1309                                         printf("Port %d can not be set back "
1310                                                         "to stopped\n", pi);
1311                                 printf("Fail to configure port %d\n", pi);
1312                                 /* try to reconfigure port next time */
1313                                 port->need_reconfig = 1;
1314                                 return -1;
1315                         }
1316                 }
1317                 if (port->need_reconfig_queues > 0) {
1318                         port->need_reconfig_queues = 0;
1319                         /* setup tx queues */
1320                         for (qi = 0; qi < nb_txq; qi++) {
1321                                 if ((numa_support) &&
1322                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1323                                         diag = rte_eth_tx_queue_setup(pi, qi,
1324                                                 nb_txd,txring_numa[pi],
1325                                                 &(port->tx_conf));
1326                                 else
1327                                         diag = rte_eth_tx_queue_setup(pi, qi,
1328                                                 nb_txd,port->socket_id,
1329                                                 &(port->tx_conf));
1330
1331                                 if (diag == 0)
1332                                         continue;
1333
1334                                 /* Fail to setup tx queue, return */
1335                                 if (rte_atomic16_cmpset(&(port->port_status),
1336                                                         RTE_PORT_HANDLING,
1337                                                         RTE_PORT_STOPPED) == 0)
1338                                         printf("Port %d can not be set back "
1339                                                         "to stopped\n", pi);
1340                                 printf("Fail to configure port %d tx queues\n", pi);
1341                                 /* try to reconfigure queues next time */
1342                                 port->need_reconfig_queues = 1;
1343                                 return -1;
1344                         }
1345                         /* setup rx queues */
1346                         for (qi = 0; qi < nb_rxq; qi++) {
1347                                 if ((numa_support) &&
1348                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1349                                         struct rte_mempool * mp =
1350                                                 mbuf_pool_find(rxring_numa[pi]);
1351                                         if (mp == NULL) {
1352                                                 printf("Failed to setup RX queue:"
1353                                                         "No mempool allocation"
1354                                                         "on the socket %d\n",
1355                                                         rxring_numa[pi]);
1356                                                 return -1;
1357                                         }
1358
1359                                         diag = rte_eth_rx_queue_setup(pi, qi,
1360                                              nb_rxd,rxring_numa[pi],
1361                                              &(port->rx_conf),mp);
1362                                 }
1363                                 else
1364                                         diag = rte_eth_rx_queue_setup(pi, qi,
1365                                              nb_rxd,port->socket_id,
1366                                              &(port->rx_conf),
1367                                              mbuf_pool_find(port->socket_id));
1368
1369                                 if (diag == 0)
1370                                         continue;
1371
1372
1373                                 /* Fail to setup rx queue, return */
1374                                 if (rte_atomic16_cmpset(&(port->port_status),
1375                                                         RTE_PORT_HANDLING,
1376                                                         RTE_PORT_STOPPED) == 0)
1377                                         printf("Port %d can not be set back "
1378                                                         "to stopped\n", pi);
1379                                 printf("Fail to configure port %d rx queues\n", pi);
1380                                 /* try to reconfigure queues next time */
1381                                 port->need_reconfig_queues = 1;
1382                                 return -1;
1383                         }
1384                 }
1385                 /* start port */
1386                 if (rte_eth_dev_start(pi) < 0) {
1387                         printf("Fail to start port %d\n", pi);
1388
1389                         /* Fail to setup rx queue, return */
1390                         if (rte_atomic16_cmpset(&(port->port_status),
1391                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392                                 printf("Port %d can not be set back to "
1393                                                         "stopped\n", pi);
1394                         continue;
1395                 }
1396
1397                 if (rte_atomic16_cmpset(&(port->port_status),
1398                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1399                         printf("Port %d can not be set into started\n", pi);
1400
1401                 rte_eth_macaddr_get(pi, &mac_addr);
1402                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1403                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1404                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1405                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1406
1407                 /* at least one port started, need checking link status */
1408                 need_check_link_status = 1;
1409         }
1410
1411         if (need_check_link_status && !no_link_check)
1412                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1413         else
1414                 printf("Please stop the ports first\n");
1415
1416         printf("Done\n");
1417         return 0;
1418 }
1419
1420 void
1421 stop_port(portid_t pid)
1422 {
1423         portid_t pi;
1424         struct rte_port *port;
1425         int need_check_link_status = 0;
1426
1427         if (test_done == 0) {
1428                 printf("Please stop forwarding first\n");
1429                 return;
1430         }
1431         if (dcb_test) {
1432                 dcb_test = 0;
1433                 dcb_config = 0;
1434         }
1435         printf("Stopping ports...\n");
1436
1437         for (pi = 0; pi < nb_ports; pi++) {
1438                 if (pid < nb_ports && pid != pi)
1439                         continue;
1440
1441                 port = &ports[pi];
1442                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1443                                                 RTE_PORT_HANDLING) == 0)
1444                         continue;
1445
1446                 rte_eth_dev_stop(pi);
1447
1448                 if (rte_atomic16_cmpset(&(port->port_status),
1449                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1450                         printf("Port %d can not be set into stopped\n", pi);
1451                 need_check_link_status = 1;
1452         }
1453         if (need_check_link_status && !no_link_check)
1454                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1455
1456         printf("Done\n");
1457 }
1458
1459 void
1460 close_port(portid_t pid)
1461 {
1462         portid_t pi;
1463         struct rte_port *port;
1464
1465         if (test_done == 0) {
1466                 printf("Please stop forwarding first\n");
1467                 return;
1468         }
1469
1470         printf("Closing ports...\n");
1471
1472         for (pi = 0; pi < nb_ports; pi++) {
1473                 if (pid < nb_ports && pid != pi)
1474                         continue;
1475
1476                 port = &ports[pi];
1477                 if (rte_atomic16_cmpset(&(port->port_status),
1478                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1479                         printf("Port %d is now not stopped\n", pi);
1480                         continue;
1481                 }
1482
1483                 rte_eth_dev_close(pi);
1484
1485                 if (rte_atomic16_cmpset(&(port->port_status),
1486                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1487                         printf("Port %d can not be set into stopped\n", pi);
1488         }
1489
1490         printf("Done\n");
1491 }
1492
1493 int
1494 all_ports_stopped(void)
1495 {
1496         portid_t pi;
1497         struct rte_port *port;
1498
1499         for (pi = 0; pi < nb_ports; pi++) {
1500                 port = &ports[pi];
1501                 if (port->port_status != RTE_PORT_STOPPED)
1502                         return 0;
1503         }
1504
1505         return 1;
1506 }
1507
1508 int
1509 port_is_started(portid_t port_id)
1510 {
1511         if (port_id_is_invalid(port_id))
1512                 return -1;
1513
1514         if (ports[port_id].port_status != RTE_PORT_STARTED)
1515                 return 0;
1516
1517         return 1;
1518 }
1519
1520 void
1521 pmd_test_exit(void)
1522 {
1523         portid_t pt_id;
1524
1525         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1526                 printf("Stopping port %d...", pt_id);
1527                 fflush(stdout);
1528                 rte_eth_dev_close(pt_id);
1529                 printf("done\n");
1530         }
1531         printf("bye...\n");
1532 }
1533
1534 typedef void (*cmd_func_t)(void);
1535 struct pmd_test_command {
1536         const char *cmd_name;
1537         cmd_func_t cmd_func;
1538 };
1539
1540 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1541
1542 /* Check the link status of all ports in up to 9s, and print them finally */
1543 static void
1544 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1545 {
1546 #define CHECK_INTERVAL 100 /* 100ms */
1547 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1548         uint8_t portid, count, all_ports_up, print_flag = 0;
1549         struct rte_eth_link link;
1550
1551         printf("Checking link statuses...\n");
1552         fflush(stdout);
1553         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1554                 all_ports_up = 1;
1555                 for (portid = 0; portid < port_num; portid++) {
1556                         if ((port_mask & (1 << portid)) == 0)
1557                                 continue;
1558                         memset(&link, 0, sizeof(link));
1559                         rte_eth_link_get_nowait(portid, &link);
1560                         /* print link status if flag set */
1561                         if (print_flag == 1) {
1562                                 if (link.link_status)
1563                                         printf("Port %d Link Up - speed %u "
1564                                                 "Mbps - %s\n", (uint8_t)portid,
1565                                                 (unsigned)link.link_speed,
1566                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1567                                         ("full-duplex") : ("half-duplex\n"));
1568                                 else
1569                                         printf("Port %d Link Down\n",
1570                                                 (uint8_t)portid);
1571                                 continue;
1572                         }
1573                         /* clear all_ports_up flag if any link down */
1574                         if (link.link_status == 0) {
1575                                 all_ports_up = 0;
1576                                 break;
1577                         }
1578                 }
1579                 /* after finally printing all link status, get out */
1580                 if (print_flag == 1)
1581                         break;
1582
1583                 if (all_ports_up == 0) {
1584                         fflush(stdout);
1585                         rte_delay_ms(CHECK_INTERVAL);
1586                 }
1587
1588                 /* set the print_flag if all ports up or timeout */
1589                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1590                         print_flag = 1;
1591                 }
1592         }
1593 }
1594
1595 static int
1596 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1597 {
1598         uint16_t i;
1599         int diag;
1600         uint8_t mapping_found = 0;
1601
1602         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1603                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1604                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1605                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1606                                         tx_queue_stats_mappings[i].queue_id,
1607                                         tx_queue_stats_mappings[i].stats_counter_id);
1608                         if (diag != 0)
1609                                 return diag;
1610                         mapping_found = 1;
1611                 }
1612         }
1613         if (mapping_found)
1614                 port->tx_queue_stats_mapping_enabled = 1;
1615         return 0;
1616 }
1617
1618 static int
1619 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1620 {
1621         uint16_t i;
1622         int diag;
1623         uint8_t mapping_found = 0;
1624
1625         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1626                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1627                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1628                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1629                                         rx_queue_stats_mappings[i].queue_id,
1630                                         rx_queue_stats_mappings[i].stats_counter_id);
1631                         if (diag != 0)
1632                                 return diag;
1633                         mapping_found = 1;
1634                 }
1635         }
1636         if (mapping_found)
1637                 port->rx_queue_stats_mapping_enabled = 1;
1638         return 0;
1639 }
1640
1641 static void
1642 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1643 {
1644         int diag = 0;
1645
1646         diag = set_tx_queue_stats_mapping_registers(pi, port);
1647         if (diag != 0) {
1648                 if (diag == -ENOTSUP) {
1649                         port->tx_queue_stats_mapping_enabled = 0;
1650                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1651                 }
1652                 else
1653                         rte_exit(EXIT_FAILURE,
1654                                         "set_tx_queue_stats_mapping_registers "
1655                                         "failed for port id=%d diag=%d\n",
1656                                         pi, diag);
1657         }
1658
1659         diag = set_rx_queue_stats_mapping_registers(pi, port);
1660         if (diag != 0) {
1661                 if (diag == -ENOTSUP) {
1662                         port->rx_queue_stats_mapping_enabled = 0;
1663                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1664                 }
1665                 else
1666                         rte_exit(EXIT_FAILURE,
1667                                         "set_rx_queue_stats_mapping_registers "
1668                                         "failed for port id=%d diag=%d\n",
1669                                         pi, diag);
1670         }
1671 }
1672
1673 static void
1674 rxtx_port_config(struct rte_port *port)
1675 {
1676         port->rx_conf = port->dev_info.default_rxconf;
1677         port->tx_conf = port->dev_info.default_txconf;
1678
1679         /* Check if any RX/TX parameters have been passed */
1680         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1681                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1682
1683         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1684                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1685
1686         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1687                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1688
1689         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1690                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1691
1692         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1693                 port->rx_conf.rx_drop_en = rx_drop_en;
1694
1695         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1696                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1697
1698         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1699                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1700
1701         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1702                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1703
1704         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1705                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1706
1707         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1708                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1709
1710         if (txq_flags != RTE_PMD_PARAM_UNSET)
1711                 port->tx_conf.txq_flags = txq_flags;
1712 }
1713
1714 void
1715 init_port_config(void)
1716 {
1717         portid_t pid;
1718         struct rte_port *port;
1719
1720         for (pid = 0; pid < nb_ports; pid++) {
1721                 port = &ports[pid];
1722                 port->dev_conf.rxmode = rx_mode;
1723                 port->dev_conf.fdir_conf = fdir_conf;
1724                 if (nb_rxq > 1) {
1725                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1726                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1727                 } else {
1728                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1729                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1730                 }
1731
1732                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1733                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1734                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1735                         else
1736                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1737                 }
1738
1739                 if (port->dev_info.max_vfs != 0) {
1740                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1741                                 port->dev_conf.rxmode.mq_mode =
1742                                         ETH_MQ_RX_VMDQ_RSS;
1743                         else
1744                                 port->dev_conf.rxmode.mq_mode =
1745                                         ETH_MQ_RX_NONE;
1746
1747                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1748                 }
1749
1750                 rxtx_port_config(port);
1751
1752                 rte_eth_macaddr_get(pid, &port->eth_addr);
1753
1754                 map_port_queue_stats_mapping_registers(pid, port);
1755 #ifdef RTE_NIC_BYPASS
1756                 rte_eth_dev_bypass_init(pid);
1757 #endif
1758         }
1759 }
1760
1761 const uint16_t vlan_tags[] = {
1762                 0,  1,  2,  3,  4,  5,  6,  7,
1763                 8,  9, 10, 11,  12, 13, 14, 15,
1764                 16, 17, 18, 19, 20, 21, 22, 23,
1765                 24, 25, 26, 27, 28, 29, 30, 31
1766 };
1767
1768 static  int
1769 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1770 {
1771         uint8_t i;
1772
1773         /*
1774          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1775          * given above, and the number of traffic classes available for use.
1776          */
1777         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1778                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1779                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1780
1781                 /* VMDQ+DCB RX and TX configrations */
1782                 vmdq_rx_conf.enable_default_pool = 0;
1783                 vmdq_rx_conf.default_pool = 0;
1784                 vmdq_rx_conf.nb_queue_pools =
1785                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1786                 vmdq_tx_conf.nb_queue_pools =
1787                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1788
1789                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1790                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1791                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1792                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1793                 }
1794                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1795                         vmdq_rx_conf.dcb_queue[i] = i;
1796                         vmdq_tx_conf.dcb_queue[i] = i;
1797                 }
1798
1799                 /*set DCB mode of RX and TX of multiple queues*/
1800                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1801                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1802                 if (dcb_conf->pfc_en)
1803                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1804                 else
1805                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1806
1807                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1808                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1809                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1810                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1811         }
1812         else {
1813                 struct rte_eth_dcb_rx_conf rx_conf;
1814                 struct rte_eth_dcb_tx_conf tx_conf;
1815
1816                 /* queue mapping configuration of DCB RX and TX */
1817                 if (dcb_conf->num_tcs == ETH_4_TCS)
1818                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1819                 else
1820                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1821
1822                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1823                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1824
1825                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1826                         rx_conf.dcb_queue[i] = i;
1827                         tx_conf.dcb_queue[i] = i;
1828                 }
1829                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1830                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1831                 if (dcb_conf->pfc_en)
1832                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1833                 else
1834                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1835
1836                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1837                                 sizeof(struct rte_eth_dcb_rx_conf)));
1838                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1839                                 sizeof(struct rte_eth_dcb_tx_conf)));
1840         }
1841
1842         return 0;
1843 }
1844
1845 int
1846 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1847 {
1848         struct rte_eth_conf port_conf;
1849         struct rte_port *rte_port;
1850         int retval;
1851         uint16_t nb_vlan;
1852         uint16_t i;
1853
1854         /* rxq and txq configuration in dcb mode */
1855         nb_rxq = 128;
1856         nb_txq = 128;
1857         rx_free_thresh = 64;
1858
1859         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1860         /* Enter DCB configuration status */
1861         dcb_config = 1;
1862
1863         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1864         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1865         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1866         if (retval < 0)
1867                 return retval;
1868
1869         rte_port = &ports[pid];
1870         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1871
1872         rxtx_port_config(rte_port);
1873         /* VLAN filter */
1874         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1875         for (i = 0; i < nb_vlan; i++){
1876                 rx_vft_set(pid, vlan_tags[i], 1);
1877         }
1878
1879         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1880         map_port_queue_stats_mapping_registers(pid, rte_port);
1881
1882         rte_port->dcb_flag = 1;
1883
1884         return 0;
1885 }
1886
1887 int
1888 main(int argc, char** argv)
1889 {
1890         int  diag;
1891         uint8_t port_id;
1892
1893         diag = rte_eal_init(argc, argv);
1894         if (diag < 0)
1895                 rte_panic("Cannot init EAL\n");
1896
1897         nb_ports = (portid_t) rte_eth_dev_count();
1898         if (nb_ports == 0)
1899                 rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
1900
1901         set_def_fwd_config();
1902         if (nb_lcores == 0)
1903                 rte_panic("Empty set of forwarding logical cores - check the "
1904                           "core mask supplied in the command parameters\n");
1905
1906         argc -= diag;
1907         argv += diag;
1908         if (argc > 1)
1909                 launch_args_parse(argc, argv);
1910
1911         if (nb_rxq > nb_txq)
1912                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1913                        "but nb_txq=%d will prevent to fully test it.\n",
1914                        nb_rxq, nb_txq);
1915
1916         init_config();
1917         if (start_port(RTE_PORT_ALL) != 0)
1918                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1919
1920         /* set all ports to promiscuous mode by default */
1921         for (port_id = 0; port_id < nb_ports; port_id++)
1922                 rte_eth_promiscuous_enable(port_id);
1923
1924 #ifdef RTE_LIBRTE_CMDLINE
1925         if (interactive == 1) {
1926                 if (auto_start) {
1927                         printf("Start automatic packet forwarding\n");
1928                         start_packet_forwarding(0);
1929                 }
1930                 prompt();
1931         } else
1932 #endif
1933         {
1934                 char c;
1935                 int rc;
1936
1937                 printf("No commandline core given, start packet forwarding\n");
1938                 start_packet_forwarding(0);
1939                 printf("Press enter to exit\n");
1940                 rc = read(0, &c, 1);
1941                 if (rc < 0)
1942                         return 1;
1943         }
1944
1945         return 0;
1946 }