97229a5d36e06ba5a524c9cb37b55c0dbe7185de
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is 
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG; 
101
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;        /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144         &io_fwd_engine,
145         &mac_fwd_engine,
146         &mac_retry_fwd_engine,
147         &rx_only_engine,
148         &tx_only_engine,
149         &csum_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151         &ieee1588_fwd_engine,
152 #endif
153         NULL,
154 };
155
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
161                                       * specified on command-line. */
162
163 /*
164  * Configuration of packet segments used by the "txonly" processing engine.
165  */
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168         TXONLY_DEF_PACKET_LEN,
169 };
170 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
177  
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
180  
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
183
184 /*
185  * Configurable number of RX/TX queues.
186  */
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
189
190 /*
191  * Configurable number of RX/TX ring descriptors.
192  */
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
197
198 /*
199  * Configurable values of RX and TX ring threshold registers.
200  */
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208
209 struct rte_eth_thresh rx_thresh = {
210         .pthresh = RX_PTHRESH,
211         .hthresh = RX_HTHRESH,
212         .wthresh = RX_WTHRESH,
213 };
214
215 struct rte_eth_thresh tx_thresh = {
216         .pthresh = TX_PTHRESH,
217         .hthresh = TX_HTHRESH,
218         .wthresh = TX_WTHRESH,
219 };
220
221 /*
222  * Configurable value of RX free threshold.
223  */
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
225
226 /*
227  * Configurable value of RX drop enable.
228  */
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
230
231 /*
232  * Configurable value of TX free threshold.
233  */
234 uint16_t tx_free_thresh = 0; /* Use default values. */
235
236 /*
237  * Configurable value of TX RS bit threshold.
238  */
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
240
241 /*
242  * Configurable value of TX queue flags.
243  */
244 uint32_t txq_flags = 0; /* No flags set. */
245
246 /*
247  * Receive Side Scaling (RSS) configuration.
248  */
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
250
251 /*
252  * Port topology configuration
253  */
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255
256 /*
257  * Avoids to flush all the RX streams before starts forwarding.
258  */
259 uint8_t no_flush_rx = 0; /* flush by default */
260
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 
268
269 #endif
270
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276         .split_hdr_size = 0,
277         .header_split   = 0, /**< Header Split disabled. */
278         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285
286 struct rte_fdir_conf fdir_conf = {
287         .mode = RTE_FDIR_MODE_NONE,
288         .pballoc = RTE_FDIR_PBALLOC_64K,
289         .status = RTE_FDIR_REPORT_STATUS,
290         .flexbytes_offset = 0x6,
291         .drop_queue = 127,
292 };
293
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
304
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308
309 /*
310  * Check if all the ports are started.
311  * If yes, return positive value. If not, return zero.
312  */
313 static int all_ports_started(void);
314
315 /*
316  * Setup default configuration.
317  */
318 static void
319 set_default_fwd_lcores_config(void)
320 {
321         unsigned int i;
322         unsigned int nb_lc;
323
324         nb_lc = 0;
325         for (i = 0; i < RTE_MAX_LCORE; i++) {
326                 if (! rte_lcore_is_enabled(i))
327                         continue;
328                 if (i == rte_get_master_lcore())
329                         continue;
330                 fwd_lcores_cpuids[nb_lc++] = i;
331         }
332         nb_lcores = (lcoreid_t) nb_lc;
333         nb_cfg_lcores = nb_lcores;
334         nb_fwd_lcores = 1;
335 }
336
337 static void
338 set_def_peer_eth_addrs(void)
339 {
340         portid_t i;
341
342         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344                 peer_eth_addrs[i].addr_bytes[5] = i;
345         }
346 }
347
348 static void
349 set_default_fwd_ports_config(void)
350 {
351         portid_t pt_id;
352
353         for (pt_id = 0; pt_id < nb_ports; pt_id++)
354                 fwd_ports_ids[pt_id] = pt_id;
355
356         nb_cfg_ports = nb_ports;
357         nb_fwd_ports = nb_ports;
358 }
359
360 void
361 set_def_fwd_config(void)
362 {
363         set_default_fwd_lcores_config();
364         set_def_peer_eth_addrs();
365         set_default_fwd_ports_config();
366 }
367
368 /*
369  * Configuration initialisation done once at init time.
370  */
371 struct mbuf_ctor_arg {
372         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
374 };
375
376 struct mbuf_pool_ctor_arg {
377         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378 };
379
380 static void
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
382                   void *opaque_arg,
383                   void *raw_mbuf,
384                   __attribute__((unused)) unsigned i)
385 {
386         struct mbuf_ctor_arg *mb_ctor_arg;
387         struct rte_mbuf    *mb;
388
389         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390         mb = (struct rte_mbuf *) raw_mbuf;
391
392         mb->type         = RTE_MBUF_PKT;
393         mb->pool         = mp;
394         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396                         mb_ctor_arg->seg_buf_offset);
397         mb->buf_len      = mb_ctor_arg->seg_buf_size;
398         mb->type         = RTE_MBUF_PKT;
399         mb->ol_flags     = 0;
400         mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401         mb->pkt.nb_segs  = 1;
402         mb->pkt.vlan_macip.data = 0;
403         mb->pkt.hash.rss = 0;
404 }
405
406 static void
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408                        void *opaque_arg)
409 {
410         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
411         struct rte_pktmbuf_pool_private *mbp_priv;
412
413         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414                 printf("%s(%s) private_data_size %d < %d\n",
415                        __func__, mp->name, (int) mp->private_data_size,
416                        (int) sizeof(struct rte_pktmbuf_pool_private));
417                 return;
418         }
419         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420         mbp_priv = rte_mempool_get_priv(mp);
421         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422 }
423
424 static void
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426                  unsigned int socket_id)
427 {
428         char pool_name[RTE_MEMPOOL_NAMESIZE];
429         struct rte_mempool *rte_mp;
430         struct mbuf_pool_ctor_arg mbp_ctor_arg;
431         struct mbuf_ctor_arg mb_ctor_arg;
432         uint32_t mb_size;
433
434         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435                                                 mbuf_seg_size);
436         mb_ctor_arg.seg_buf_offset =
437                 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444                                    (unsigned) mb_mempool_cache,
445                                    sizeof(struct rte_pktmbuf_pool_private),
446                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447                                    testpmd_mbuf_ctor, &mb_ctor_arg,
448                                    socket_id, 0);
449
450
451
452 #else
453         if (mp_anon != 0)
454                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455                                     (unsigned) mb_mempool_cache,
456                                     sizeof(struct rte_pktmbuf_pool_private),
457                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458                                     testpmd_mbuf_ctor, &mb_ctor_arg,
459                                     socket_id, 0);
460         else 
461                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462                                     (unsigned) mb_mempool_cache,
463                                     sizeof(struct rte_pktmbuf_pool_private),
464                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465                                     testpmd_mbuf_ctor, &mb_ctor_arg,
466                                     socket_id, 0);
467
468 #endif
469
470         if (rte_mp == NULL) {
471                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472                                                 "failed\n", socket_id);
473         } else if (verbose_level > 0) {
474                 rte_mempool_dump(rte_mp);
475         }
476 }
477
478 /*
479  * Check given socket id is valid or not with NUMA mode,
480  * if valid, return 0, else return -1
481  */
482 static int
483 check_socket_id(const unsigned int socket_id)
484 {
485         static int warning_once = 0;
486
487         if (socket_id >= MAX_SOCKET) {
488                 if (!warning_once && numa_support)
489                         printf("Warning: NUMA should be configured manually by"
490                                " using --port-numa-config and"
491                                " --ring-numa-config parameters along with"
492                                " --numa.\n");
493                 warning_once = 1;
494                 return -1;
495         }
496         return 0;
497 }
498
499 static void
500 init_config(void)
501 {
502         portid_t pid;
503         struct rte_port *port;
504         struct rte_mempool *mbp;
505         unsigned int nb_mbuf_per_pool;
506         lcoreid_t  lc_id;
507         uint8_t port_per_socket[MAX_SOCKET];
508
509         memset(port_per_socket,0,MAX_SOCKET);
510         /* Configuration of logical cores. */
511         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
512                                 sizeof(struct fwd_lcore *) * nb_lcores,
513                                 CACHE_LINE_SIZE);
514         if (fwd_lcores == NULL) {
515                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
516                                                         "failed\n", nb_lcores);
517         }
518         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
519                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
520                                                sizeof(struct fwd_lcore),
521                                                CACHE_LINE_SIZE);
522                 if (fwd_lcores[lc_id] == NULL) {
523                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
524                                                                 "failed\n");
525                 }
526                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
527         }
528
529         /*
530          * Create pools of mbuf.
531          * If NUMA support is disabled, create a single pool of mbuf in
532          * socket 0 memory by default.
533          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
534          *
535          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
536          * nb_txd can be configured at run time.
537          */
538         if (param_total_num_mbufs) 
539                 nb_mbuf_per_pool = param_total_num_mbufs;
540         else {
541                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
542                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
543                 
544                 if (!numa_support) 
545                         nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
546         }
547
548         if (!numa_support) {
549                 if (socket_num == UMA_NO_CONFIG)
550                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
551                 else
552                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
553                                                  socket_num);
554         }
555         /*
556          * Records which Mbuf pool to use by each logical core, if needed.
557          */
558         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
559                 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
560                 if (mbp == NULL)
561                         mbp = mbuf_pool_find(0);
562                 fwd_lcores[lc_id]->mbp = mbp;
563         }
564
565         /* Configuration of Ethernet ports. */
566         ports = rte_zmalloc("testpmd: ports",
567                             sizeof(struct rte_port) * nb_ports,
568                             CACHE_LINE_SIZE);
569         if (ports == NULL) {
570                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571                                                         "failed\n", nb_ports);
572         }
573         
574         for (pid = 0; pid < nb_ports; pid++) {
575                 port = &ports[pid];
576                 rte_eth_dev_info_get(pid, &port->dev_info);
577
578                 if (numa_support) {
579                         if (port_numa[pid] != NUMA_NO_CONFIG) 
580                                 port_per_socket[port_numa[pid]]++;
581                         else {
582                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
583
584                                 /* if socket_id is invalid, set to 0 */
585                                 if (check_socket_id(socket_id) < 0)
586                                         socket_id = 0;
587                                 port_per_socket[socket_id]++; 
588                         }
589                 }
590
591                 /* set flag to initialize port/queue */
592                 port->need_reconfig = 1;
593                 port->need_reconfig_queues = 1;
594         }
595
596         if (numa_support) {
597                 uint8_t i;
598                 unsigned int nb_mbuf;
599
600                 if (param_total_num_mbufs)
601                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
602
603                 for (i = 0; i < MAX_SOCKET; i++) {
604                         nb_mbuf = (nb_mbuf_per_pool * 
605                                                 port_per_socket[i]);
606                         if (nb_mbuf) 
607                                 mbuf_pool_create(mbuf_data_size,
608                                                 nb_mbuf,i);
609                 }
610         }
611         init_port_config();
612         /* Configuration of packet forwarding streams. */
613         if (init_fwd_streams() < 0)
614                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
615 }
616
617 int
618 init_fwd_streams(void)
619 {
620         portid_t pid;
621         struct rte_port *port;
622         streamid_t sm_id, nb_fwd_streams_new;
623
624         /* set socket id according to numa or not */
625         for (pid = 0; pid < nb_ports; pid++) {
626                 port = &ports[pid];
627                 if (nb_rxq > port->dev_info.max_rx_queues) {
628                         printf("Fail: nb_rxq(%d) is greater than "
629                                 "max_rx_queues(%d)\n", nb_rxq,
630                                 port->dev_info.max_rx_queues);
631                         return -1;
632                 }
633                 if (nb_txq > port->dev_info.max_tx_queues) {
634                         printf("Fail: nb_txq(%d) is greater than "
635                                 "max_tx_queues(%d)\n", nb_txq,
636                                 port->dev_info.max_tx_queues);
637                         return -1;
638                 }
639                 if (numa_support) {
640                         if (port_numa[pid] != NUMA_NO_CONFIG)
641                                 port->socket_id = port_numa[pid];
642                         else {
643                                 port->socket_id = rte_eth_dev_socket_id(pid);
644
645                                 /* if socket_id is invalid, set to 0 */
646                                 if (check_socket_id(port->socket_id) < 0)
647                                         port->socket_id = 0;
648                         }
649                 }
650                 else {
651                         if (socket_num == UMA_NO_CONFIG)         
652                                 port->socket_id = 0;
653                         else 
654                                 port->socket_id = socket_num;   
655                 }
656         }
657
658         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
659         if (nb_fwd_streams_new == nb_fwd_streams)
660                 return 0;
661         /* clear the old */
662         if (fwd_streams != NULL) {
663                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
664                         if (fwd_streams[sm_id] == NULL)
665                                 continue;
666                         rte_free(fwd_streams[sm_id]);
667                         fwd_streams[sm_id] = NULL;
668                 }
669                 rte_free(fwd_streams);
670                 fwd_streams = NULL;
671         }
672
673         /* init new */
674         nb_fwd_streams = nb_fwd_streams_new;
675         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
676                 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
677         if (fwd_streams == NULL)
678                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
679                                                 "failed\n", nb_fwd_streams);
680
681         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
682                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
683                                 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
684                 if (fwd_streams[sm_id] == NULL)
685                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
686                                                                 " failed\n");
687         }
688
689         return 0;
690 }
691
692 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
693 static void
694 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
695 {
696         unsigned int total_burst;
697         unsigned int nb_burst;
698         unsigned int burst_stats[3];
699         uint16_t pktnb_stats[3];
700         uint16_t nb_pkt;
701         int burst_percent[3];
702
703         /*
704          * First compute the total number of packet bursts and the
705          * two highest numbers of bursts of the same number of packets.
706          */
707         total_burst = 0;
708         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
709         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
710         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
711                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
712                 if (nb_burst == 0)
713                         continue;
714                 total_burst += nb_burst;
715                 if (nb_burst > burst_stats[0]) {
716                         burst_stats[1] = burst_stats[0];
717                         pktnb_stats[1] = pktnb_stats[0];
718                         burst_stats[0] = nb_burst;
719                         pktnb_stats[0] = nb_pkt;
720                 }
721         }
722         if (total_burst == 0)
723                 return;
724         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
725         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
726                burst_percent[0], (int) pktnb_stats[0]);
727         if (burst_stats[0] == total_burst) {
728                 printf("]\n");
729                 return;
730         }
731         if (burst_stats[0] + burst_stats[1] == total_burst) {
732                 printf(" + %d%% of %d pkts]\n",
733                        100 - burst_percent[0], pktnb_stats[1]);
734                 return;
735         }
736         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
737         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
738         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
739                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
740                 return;
741         }
742         printf(" + %d%% of %d pkts + %d%% of others]\n",
743                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
744 }
745 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
746
747 static void
748 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
749 {
750         struct rte_port *port;
751         uint8_t i;
752
753         static const char *fwd_stats_border = "----------------------";
754
755         port = &ports[port_id];
756         printf("\n  %s Forward statistics for port %-2d %s\n",
757                fwd_stats_border, port_id, fwd_stats_border);
758
759         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
760                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
761                        "%-"PRIu64"\n",
762                        stats->ipackets, stats->ierrors,
763                        (uint64_t) (stats->ipackets + stats->ierrors));
764
765                 if (cur_fwd_eng == &csum_fwd_engine)
766                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
767                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
768
769                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
770                        "%-"PRIu64"\n",
771                        stats->opackets, port->tx_dropped,
772                        (uint64_t) (stats->opackets + port->tx_dropped));
773
774                 if (stats->rx_nombuf > 0)
775                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
776
777         }
778         else {
779                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
780                        "%14"PRIu64"\n",
781                        stats->ipackets, stats->ierrors,
782                        (uint64_t) (stats->ipackets + stats->ierrors));
783
784                 if (cur_fwd_eng == &csum_fwd_engine)
785                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
786                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
787
788                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
789                        "%14"PRIu64"\n",
790                        stats->opackets, port->tx_dropped,
791                        (uint64_t) (stats->opackets + port->tx_dropped));
792
793                 if (stats->rx_nombuf > 0)
794                         printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
795         }
796
797         /* Display statistics of XON/XOFF pause frames, if any. */
798         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
799              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
800                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
801                        stats->rx_pause_xoff, stats->rx_pause_xon);
802                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
803                        stats->tx_pause_xoff, stats->tx_pause_xon);
804         }
805
806 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
807         if (port->rx_stream)
808                 pkt_burst_stats_display("RX",
809                         &port->rx_stream->rx_burst_stats);
810         if (port->tx_stream)
811                 pkt_burst_stats_display("TX",
812                         &port->tx_stream->tx_burst_stats);
813 #endif
814         /* stats fdir */
815         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
816                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
817                        stats->fdirmiss,
818                        stats->fdirmatch);
819
820         if (port->rx_queue_stats_mapping_enabled) {
821                 printf("\n");
822                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
823                         printf("  Stats reg %2d RX-packets:%14"PRIu64
824                                "     RX-errors:%14"PRIu64
825                                "    RX-bytes:%14"PRIu64"\n",
826                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
827                 }
828                 printf("\n");
829         }
830         if (port->tx_queue_stats_mapping_enabled) {
831                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
832                         printf("  Stats reg %2d TX-packets:%14"PRIu64
833                                "                                 TX-bytes:%14"PRIu64"\n",
834                                i, stats->q_opackets[i], stats->q_obytes[i]);
835                 }
836         }
837
838         printf("  %s--------------------------------%s\n",
839                fwd_stats_border, fwd_stats_border);
840 }
841
842 static void
843 fwd_stream_stats_display(streamid_t stream_id)
844 {
845         struct fwd_stream *fs;
846         static const char *fwd_top_stats_border = "-------";
847
848         fs = fwd_streams[stream_id];
849         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
850             (fs->fwd_dropped == 0))
851                 return;
852         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
853                "TX Port=%2d/Queue=%2d %s\n",
854                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
855                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
856         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
857                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
858
859         /* if checksum mode */
860         if (cur_fwd_eng == &csum_fwd_engine) {
861                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
862                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
863         }
864
865 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
866         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
867         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
868 #endif
869 }
870
871 static void
872 flush_fwd_rx_queues(void)
873 {
874         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
875         portid_t  rxp;
876         portid_t port_id;
877         queueid_t rxq;
878         uint16_t  nb_rx;
879         uint16_t  i;
880         uint8_t   j;
881
882         for (j = 0; j < 2; j++) {
883                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
884                         for (rxq = 0; rxq < nb_rxq; rxq++) {
885                                 port_id = fwd_ports_ids[rxp];
886                                 do {
887                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
888                                                 pkts_burst, MAX_PKT_BURST);
889                                         for (i = 0; i < nb_rx; i++)
890                                                 rte_pktmbuf_free(pkts_burst[i]);
891                                 } while (nb_rx > 0);
892                         }
893                 }
894                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
895         }
896 }
897
898 static void
899 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
900 {
901         struct fwd_stream **fsm;
902         streamid_t nb_fs;
903         streamid_t sm_id;
904
905         fsm = &fwd_streams[fc->stream_idx];
906         nb_fs = fc->stream_nb;
907         do {
908                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
909                         (*pkt_fwd)(fsm[sm_id]);
910         } while (! fc->stopped);
911 }
912
913 static int
914 start_pkt_forward_on_core(void *fwd_arg)
915 {
916         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
917                              cur_fwd_config.fwd_eng->packet_fwd);
918         return 0;
919 }
920
921 /*
922  * Run the TXONLY packet forwarding engine to send a single burst of packets.
923  * Used to start communication flows in network loopback test configurations.
924  */
925 static int
926 run_one_txonly_burst_on_core(void *fwd_arg)
927 {
928         struct fwd_lcore *fwd_lc;
929         struct fwd_lcore tmp_lcore;
930
931         fwd_lc = (struct fwd_lcore *) fwd_arg;
932         tmp_lcore = *fwd_lc;
933         tmp_lcore.stopped = 1;
934         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
935         return 0;
936 }
937
938 /*
939  * Launch packet forwarding:
940  *     - Setup per-port forwarding context.
941  *     - launch logical cores with their forwarding configuration.
942  */
943 static void
944 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
945 {
946         port_fwd_begin_t port_fwd_begin;
947         unsigned int i;
948         unsigned int lc_id;
949         int diag;
950
951         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
952         if (port_fwd_begin != NULL) {
953                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
954                         (*port_fwd_begin)(fwd_ports_ids[i]);
955         }
956         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
957                 lc_id = fwd_lcores_cpuids[i];
958                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
959                         fwd_lcores[i]->stopped = 0;
960                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
961                                                      fwd_lcores[i], lc_id);
962                         if (diag != 0)
963                                 printf("launch lcore %u failed - diag=%d\n",
964                                        lc_id, diag);
965                 }
966         }
967 }
968
969 /*
970  * Launch packet forwarding configuration.
971  */
972 void
973 start_packet_forwarding(int with_tx_first)
974 {
975         port_fwd_begin_t port_fwd_begin;
976         port_fwd_end_t  port_fwd_end;
977         struct rte_port *port;
978         unsigned int i;
979         portid_t   pt_id;
980         streamid_t sm_id;
981
982         if (all_ports_started() == 0) {
983                 printf("Not all ports were started\n");
984                 return;
985         }
986         if (test_done == 0) {
987                 printf("Packet forwarding already started\n");
988                 return;
989         }
990         if(dcb_test) {
991                 for (i = 0; i < nb_fwd_ports; i++) {
992                         pt_id = fwd_ports_ids[i];
993                         port = &ports[pt_id];
994                         if (!port->dcb_flag) {
995                                 printf("In DCB mode, all forwarding ports must "
996                                        "be configured in this mode.\n");
997                                 return;
998                         }
999                 }
1000                 if (nb_fwd_lcores == 1) {
1001                         printf("In DCB mode,the nb forwarding cores "
1002                                "should be larger than 1.\n");
1003                         return;
1004                 }
1005         }
1006         test_done = 0;
1007
1008         if(!no_flush_rx)
1009                 flush_fwd_rx_queues();
1010
1011         fwd_config_setup();
1012         rxtx_config_display();
1013
1014         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1015                 pt_id = fwd_ports_ids[i];
1016                 port = &ports[pt_id];
1017                 rte_eth_stats_get(pt_id, &port->stats);
1018                 port->tx_dropped = 0;
1019
1020                 map_port_queue_stats_mapping_registers(pt_id, port);
1021         }
1022         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1023                 fwd_streams[sm_id]->rx_packets = 0;
1024                 fwd_streams[sm_id]->tx_packets = 0;
1025                 fwd_streams[sm_id]->fwd_dropped = 0;
1026                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1027                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1028
1029 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1030                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1031                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1032                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1033                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1034 #endif
1035 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1036                 fwd_streams[sm_id]->core_cycles = 0;
1037 #endif
1038         }
1039         if (with_tx_first) {
1040                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1041                 if (port_fwd_begin != NULL) {
1042                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1043                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1044                 }
1045                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1046                 rte_eal_mp_wait_lcore();
1047                 port_fwd_end = tx_only_engine.port_fwd_end;
1048                 if (port_fwd_end != NULL) {
1049                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1050                                 (*port_fwd_end)(fwd_ports_ids[i]);
1051                 }
1052         }
1053         launch_packet_forwarding(start_pkt_forward_on_core);
1054 }
1055
1056 void
1057 stop_packet_forwarding(void)
1058 {
1059         struct rte_eth_stats stats;
1060         struct rte_port *port;
1061         port_fwd_end_t  port_fwd_end;
1062         int i;
1063         portid_t   pt_id;
1064         streamid_t sm_id;
1065         lcoreid_t  lc_id;
1066         uint64_t total_recv;
1067         uint64_t total_xmit;
1068         uint64_t total_rx_dropped;
1069         uint64_t total_tx_dropped;
1070         uint64_t total_rx_nombuf;
1071         uint64_t tx_dropped;
1072         uint64_t rx_bad_ip_csum;
1073         uint64_t rx_bad_l4_csum;
1074 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1075         uint64_t fwd_cycles;
1076 #endif
1077         static const char *acc_stats_border = "+++++++++++++++";
1078
1079         if (all_ports_started() == 0) {
1080                 printf("Not all ports were started\n");
1081                 return;
1082         }
1083         if (test_done) {
1084                 printf("Packet forwarding not started\n");
1085                 return;
1086         }
1087         printf("Telling cores to stop...");
1088         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1089                 fwd_lcores[lc_id]->stopped = 1;
1090         printf("\nWaiting for lcores to finish...\n");
1091         rte_eal_mp_wait_lcore();
1092         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1093         if (port_fwd_end != NULL) {
1094                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1095                         pt_id = fwd_ports_ids[i];
1096                         (*port_fwd_end)(pt_id);
1097                 }
1098         }
1099 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1100         fwd_cycles = 0;
1101 #endif
1102         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1103                 if (cur_fwd_config.nb_fwd_streams >
1104                     cur_fwd_config.nb_fwd_ports) {
1105                         fwd_stream_stats_display(sm_id);
1106                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1107                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1108                 } else {
1109                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1110                                 fwd_streams[sm_id];
1111                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1112                                 fwd_streams[sm_id];
1113                 }
1114                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1115                 tx_dropped = (uint64_t) (tx_dropped +
1116                                          fwd_streams[sm_id]->fwd_dropped);
1117                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1118
1119                 rx_bad_ip_csum =
1120                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1121                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1122                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1123                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1124                                                         rx_bad_ip_csum;
1125
1126                 rx_bad_l4_csum =
1127                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1128                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1129                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1130                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1131                                                         rx_bad_l4_csum;
1132
1133 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1134                 fwd_cycles = (uint64_t) (fwd_cycles +
1135                                          fwd_streams[sm_id]->core_cycles);
1136 #endif
1137         }
1138         total_recv = 0;
1139         total_xmit = 0;
1140         total_rx_dropped = 0;
1141         total_tx_dropped = 0;
1142         total_rx_nombuf  = 0;
1143         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1144                 pt_id = fwd_ports_ids[i];
1145
1146                 port = &ports[pt_id];
1147                 rte_eth_stats_get(pt_id, &stats);
1148                 stats.ipackets -= port->stats.ipackets;
1149                 port->stats.ipackets = 0;
1150                 stats.opackets -= port->stats.opackets;
1151                 port->stats.opackets = 0;
1152                 stats.ibytes   -= port->stats.ibytes;
1153                 port->stats.ibytes = 0;
1154                 stats.obytes   -= port->stats.obytes;
1155                 port->stats.obytes = 0;
1156                 stats.ierrors  -= port->stats.ierrors;
1157                 port->stats.ierrors = 0;
1158                 stats.oerrors  -= port->stats.oerrors;
1159                 port->stats.oerrors = 0;
1160                 stats.rx_nombuf -= port->stats.rx_nombuf;
1161                 port->stats.rx_nombuf = 0;
1162                 stats.fdirmatch -= port->stats.fdirmatch;
1163                 port->stats.rx_nombuf = 0;
1164                 stats.fdirmiss -= port->stats.fdirmiss;
1165                 port->stats.rx_nombuf = 0;
1166
1167                 total_recv += stats.ipackets;
1168                 total_xmit += stats.opackets;
1169                 total_rx_dropped += stats.ierrors;
1170                 total_tx_dropped += port->tx_dropped;
1171                 total_rx_nombuf  += stats.rx_nombuf;
1172
1173                 fwd_port_stats_display(pt_id, &stats);
1174         }
1175         printf("\n  %s Accumulated forward statistics for all ports"
1176                "%s\n",
1177                acc_stats_border, acc_stats_border);
1178         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1179                "%-"PRIu64"\n"
1180                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1181                "%-"PRIu64"\n",
1182                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1183                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1184         if (total_rx_nombuf > 0)
1185                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1186         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1187                "%s\n",
1188                acc_stats_border, acc_stats_border);
1189 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1190         if (total_recv > 0)
1191                 printf("\n  CPU cycles/packet=%u (total cycles="
1192                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1193                        (unsigned int)(fwd_cycles / total_recv),
1194                        fwd_cycles, total_recv);
1195 #endif
1196         printf("\nDone.\n");
1197         test_done = 1;
1198 }
1199
1200 static int
1201 all_ports_started(void)
1202 {
1203         portid_t pi;
1204         struct rte_port *port;
1205
1206         for (pi = 0; pi < nb_ports; pi++) {
1207                 port = &ports[pi];
1208                 /* Check if there is a port which is not started */
1209                 if (port->port_status != RTE_PORT_STARTED)
1210                         return 0;
1211         }
1212
1213         /* No port is not started */
1214         return 1;
1215 }
1216
1217 int
1218 start_port(portid_t pid)
1219 {
1220         int diag, need_check_link_status = 0;
1221         portid_t pi;
1222         queueid_t qi;
1223         struct rte_port *port;
1224
1225         if (test_done == 0) {
1226                 printf("Please stop forwarding first\n");
1227                 return -1;
1228         }
1229
1230         if (init_fwd_streams() < 0) {
1231                 printf("Fail from init_fwd_streams()\n");
1232                 return -1;
1233         }
1234         
1235         if(dcb_config)
1236                 dcb_test = 1;
1237         for (pi = 0; pi < nb_ports; pi++) {
1238                 if (pid < nb_ports && pid != pi)
1239                         continue;
1240
1241                 port = &ports[pi];
1242                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1243                                                  RTE_PORT_HANDLING) == 0) {
1244                         printf("Port %d is now not stopped\n", pi);
1245                         continue;
1246                 }
1247
1248                 if (port->need_reconfig > 0) {
1249                         port->need_reconfig = 0;
1250
1251                         printf("Configuring Port %d (socket %d)\n", pi,
1252                                         port->socket_id);
1253                         /* configure port */
1254                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1255                                                 &(port->dev_conf));
1256                         if (diag != 0) {
1257                                 if (rte_atomic16_cmpset(&(port->port_status),
1258                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1259                                         printf("Port %d can not be set back "
1260                                                         "to stopped\n", pi);
1261                                 printf("Fail to configure port %d\n", pi);
1262                                 /* try to reconfigure port next time */
1263                                 port->need_reconfig = 1;
1264                                 return -1;
1265                         }
1266                 }
1267                 if (port->need_reconfig_queues > 0) {
1268                         port->need_reconfig_queues = 0;
1269                         /* setup tx queues */
1270                         for (qi = 0; qi < nb_txq; qi++) {
1271                                 if ((numa_support) &&
1272                                         (txring_numa[pi] != NUMA_NO_CONFIG)) 
1273                                         diag = rte_eth_tx_queue_setup(pi, qi,
1274                                                 nb_txd,txring_numa[pi],
1275                                                 &(port->tx_conf));
1276                                 else
1277                                         diag = rte_eth_tx_queue_setup(pi, qi, 
1278                                                 nb_txd,port->socket_id,
1279                                                 &(port->tx_conf));
1280                                         
1281                                 if (diag == 0)
1282                                         continue;
1283
1284                                 /* Fail to setup tx queue, return */
1285                                 if (rte_atomic16_cmpset(&(port->port_status),
1286                                                         RTE_PORT_HANDLING,
1287                                                         RTE_PORT_STOPPED) == 0)
1288                                         printf("Port %d can not be set back "
1289                                                         "to stopped\n", pi);
1290                                 printf("Fail to configure port %d tx queues\n", pi);
1291                                 /* try to reconfigure queues next time */
1292                                 port->need_reconfig_queues = 1;
1293                                 return -1;
1294                         }
1295                         /* setup rx queues */
1296                         for (qi = 0; qi < nb_rxq; qi++) {
1297                                 if ((numa_support) && 
1298                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1299                                         struct rte_mempool * mp = 
1300                                                 mbuf_pool_find(rxring_numa[pi]);
1301                                         if (mp == NULL) {
1302                                                 printf("Failed to setup RX queue:"
1303                                                         "No mempool allocation"
1304                                                         "on the socket %d\n",
1305                                                         rxring_numa[pi]);
1306                                                 return -1;
1307                                         }
1308                                         
1309                                         diag = rte_eth_rx_queue_setup(pi, qi,
1310                                              nb_rxd,rxring_numa[pi],
1311                                              &(port->rx_conf),mp);
1312                                 }
1313                                 else
1314                                         diag = rte_eth_rx_queue_setup(pi, qi, 
1315                                              nb_rxd,port->socket_id,
1316                                              &(port->rx_conf),
1317                                              mbuf_pool_find(port->socket_id));
1318
1319                                 if (diag == 0)
1320                                         continue;
1321
1322
1323                                 /* Fail to setup rx queue, return */
1324                                 if (rte_atomic16_cmpset(&(port->port_status),
1325                                                         RTE_PORT_HANDLING,
1326                                                         RTE_PORT_STOPPED) == 0)
1327                                         printf("Port %d can not be set back "
1328                                                         "to stopped\n", pi);
1329                                 printf("Fail to configure port %d rx queues\n", pi);
1330                                 /* try to reconfigure queues next time */
1331                                 port->need_reconfig_queues = 1;
1332                                 return -1;
1333                         }
1334                 }
1335                 /* start port */
1336                 if (rte_eth_dev_start(pi) < 0) {
1337                         printf("Fail to start port %d\n", pi);
1338
1339                         /* Fail to setup rx queue, return */
1340                         if (rte_atomic16_cmpset(&(port->port_status),
1341                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1342                                 printf("Port %d can not be set back to "
1343                                                         "stopped\n", pi);
1344                         continue;
1345                 }
1346
1347                 if (rte_atomic16_cmpset(&(port->port_status),
1348                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1349                         printf("Port %d can not be set into started\n", pi);
1350
1351                 /* at least one port started, need checking link status */
1352                 need_check_link_status = 1;
1353         }
1354
1355         if (need_check_link_status)
1356                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1357         else
1358                 printf("Please stop the ports first\n");
1359
1360         printf("Done\n");
1361         return 0;
1362 }
1363
1364 void
1365 stop_port(portid_t pid)
1366 {
1367         portid_t pi;
1368         struct rte_port *port;
1369         int need_check_link_status = 0;
1370
1371         if (test_done == 0) {
1372                 printf("Please stop forwarding first\n");
1373                 return;
1374         }
1375         if (dcb_test) {
1376                 dcb_test = 0;
1377                 dcb_config = 0;
1378         }
1379         printf("Stopping ports...\n");
1380
1381         for (pi = 0; pi < nb_ports; pi++) {
1382                 if (pid < nb_ports && pid != pi)
1383                         continue;
1384
1385                 port = &ports[pi];
1386                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1387                                                 RTE_PORT_HANDLING) == 0)
1388                         continue;
1389
1390                 rte_eth_dev_stop(pi);
1391
1392                 if (rte_atomic16_cmpset(&(port->port_status),
1393                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1394                         printf("Port %d can not be set into stopped\n", pi);
1395                 need_check_link_status = 1;
1396         }
1397         if (need_check_link_status)
1398                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1399
1400         printf("Done\n");
1401 }
1402
1403 void
1404 close_port(portid_t pid)
1405 {
1406         portid_t pi;
1407         struct rte_port *port;
1408
1409         if (test_done == 0) {
1410                 printf("Please stop forwarding first\n");
1411                 return;
1412         }
1413
1414         printf("Closing ports...\n");
1415
1416         for (pi = 0; pi < nb_ports; pi++) {
1417                 if (pid < nb_ports && pid != pi)
1418                         continue;
1419
1420                 port = &ports[pi];
1421                 if (rte_atomic16_cmpset(&(port->port_status),
1422                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1423                         printf("Port %d is now not stopped\n", pi);
1424                         continue;
1425                 }
1426
1427                 rte_eth_dev_close(pi);
1428
1429                 if (rte_atomic16_cmpset(&(port->port_status),
1430                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1431                         printf("Port %d can not be set into stopped\n", pi);
1432         }
1433
1434         printf("Done\n");
1435 }
1436
1437 int
1438 all_ports_stopped(void)
1439 {
1440         portid_t pi;
1441         struct rte_port *port;
1442
1443         for (pi = 0; pi < nb_ports; pi++) {
1444                 port = &ports[pi];
1445                 if (port->port_status != RTE_PORT_STOPPED)
1446                         return 0;
1447         }
1448
1449         return 1;
1450 }
1451
1452 void
1453 pmd_test_exit(void)
1454 {
1455         portid_t pt_id;
1456
1457         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1458                 printf("Stopping port %d...", pt_id);
1459                 fflush(stdout);
1460                 rte_eth_dev_close(pt_id);
1461                 printf("done\n");
1462         }
1463         printf("bye...\n");
1464 }
1465
1466 typedef void (*cmd_func_t)(void);
1467 struct pmd_test_command {
1468         const char *cmd_name;
1469         cmd_func_t cmd_func;
1470 };
1471
1472 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1473
1474 /* Check the link status of all ports in up to 9s, and print them finally */
1475 static void
1476 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1477 {
1478 #define CHECK_INTERVAL 100 /* 100ms */
1479 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1480         uint8_t portid, count, all_ports_up, print_flag = 0;
1481         struct rte_eth_link link;
1482
1483         printf("Checking link statuses...\n");
1484         fflush(stdout);
1485         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1486                 all_ports_up = 1;
1487                 for (portid = 0; portid < port_num; portid++) {
1488                         if ((port_mask & (1 << portid)) == 0)
1489                                 continue;
1490                         memset(&link, 0, sizeof(link));
1491                         rte_eth_link_get_nowait(portid, &link);
1492                         /* print link status if flag set */
1493                         if (print_flag == 1) {
1494                                 if (link.link_status)
1495                                         printf("Port %d Link Up - speed %u "
1496                                                 "Mbps - %s\n", (uint8_t)portid,
1497                                                 (unsigned)link.link_speed,
1498                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1499                                         ("full-duplex") : ("half-duplex\n"));
1500                                 else
1501                                         printf("Port %d Link Down\n",
1502                                                 (uint8_t)portid);
1503                                 continue;
1504                         }
1505                         /* clear all_ports_up flag if any link down */
1506                         if (link.link_status == 0) {
1507                                 all_ports_up = 0;
1508                                 break;
1509                         }
1510                 }
1511                 /* after finally printing all link status, get out */
1512                 if (print_flag == 1)
1513                         break;
1514
1515                 if (all_ports_up == 0) {
1516                         fflush(stdout);
1517                         rte_delay_ms(CHECK_INTERVAL);
1518                 }
1519
1520                 /* set the print_flag if all ports up or timeout */
1521                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1522                         print_flag = 1;
1523                 }
1524         }
1525 }
1526
1527 static int
1528 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1529 {
1530         uint16_t i;
1531         int diag;
1532         uint8_t mapping_found = 0;
1533
1534         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1535                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1536                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1537                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1538                                         tx_queue_stats_mappings[i].queue_id,
1539                                         tx_queue_stats_mappings[i].stats_counter_id);
1540                         if (diag != 0)
1541                                 return diag;
1542                         mapping_found = 1;
1543                 }
1544         }
1545         if (mapping_found)
1546                 port->tx_queue_stats_mapping_enabled = 1;
1547         return 0;
1548 }
1549
1550 static int
1551 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1552 {
1553         uint16_t i;
1554         int diag;
1555         uint8_t mapping_found = 0;
1556
1557         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1558                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1559                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1560                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1561                                         rx_queue_stats_mappings[i].queue_id,
1562                                         rx_queue_stats_mappings[i].stats_counter_id);
1563                         if (diag != 0)
1564                                 return diag;
1565                         mapping_found = 1;
1566                 }
1567         }
1568         if (mapping_found)
1569                 port->rx_queue_stats_mapping_enabled = 1;
1570         return 0;
1571 }
1572
1573 static void
1574 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1575 {
1576         int diag = 0;
1577
1578         diag = set_tx_queue_stats_mapping_registers(pi, port);
1579         if (diag != 0) {
1580                 if (diag == -ENOTSUP) {
1581                         port->tx_queue_stats_mapping_enabled = 0;
1582                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1583                 }
1584                 else
1585                         rte_exit(EXIT_FAILURE,
1586                                         "set_tx_queue_stats_mapping_registers "
1587                                         "failed for port id=%d diag=%d\n",
1588                                         pi, diag);
1589         }
1590
1591         diag = set_rx_queue_stats_mapping_registers(pi, port);
1592         if (diag != 0) {
1593                 if (diag == -ENOTSUP) {
1594                         port->rx_queue_stats_mapping_enabled = 0;
1595                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1596                 }
1597                 else
1598                         rte_exit(EXIT_FAILURE,
1599                                         "set_rx_queue_stats_mapping_registers "
1600                                         "failed for port id=%d diag=%d\n",
1601                                         pi, diag);
1602         }
1603 }
1604
1605 void
1606 init_port_config(void)
1607 {
1608         portid_t pid;
1609         struct rte_port *port;
1610
1611         for (pid = 0; pid < nb_ports; pid++) {
1612                 port = &ports[pid];
1613                 port->dev_conf.rxmode = rx_mode;
1614                 port->dev_conf.fdir_conf = fdir_conf;
1615                 if (nb_rxq > 1) {
1616                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1617                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1618                 } else {
1619                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1620                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1621                 }
1622
1623                 /* In SR-IOV mode, RSS mode is not available */
1624                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1625                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1626                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1627                         else
1628                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;        
1629                 }
1630
1631                 port->rx_conf.rx_thresh = rx_thresh;
1632                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1633                 port->rx_conf.rx_drop_en = rx_drop_en;
1634                 port->tx_conf.tx_thresh = tx_thresh;
1635                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1636                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1637                 port->tx_conf.txq_flags = txq_flags;
1638
1639                 rte_eth_macaddr_get(pid, &port->eth_addr);
1640
1641                 map_port_queue_stats_mapping_registers(pid, port);
1642 #ifdef RTE_NIC_BYPASS
1643                 rte_eth_dev_bypass_init(pid);
1644 #endif
1645         }
1646 }
1647
1648 const uint16_t vlan_tags[] = {
1649                 0,  1,  2,  3,  4,  5,  6,  7,
1650                 8,  9, 10, 11,  12, 13, 14, 15,
1651                 16, 17, 18, 19, 20, 21, 22, 23,
1652                 24, 25, 26, 27, 28, 29, 30, 31
1653 };
1654
1655 static  int
1656 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1657 {
1658         uint8_t i;
1659  
1660         /*
1661          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1662          * given above, and the number of traffic classes available for use.
1663          */
1664         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1665                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1666                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1667  
1668                 /* VMDQ+DCB RX and TX configrations */
1669                 vmdq_rx_conf.enable_default_pool = 0;
1670                 vmdq_rx_conf.default_pool = 0;
1671                 vmdq_rx_conf.nb_queue_pools =
1672                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1673                 vmdq_tx_conf.nb_queue_pools =
1674                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1675  
1676                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1677                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1678                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1679                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1680                 }
1681                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1682                         vmdq_rx_conf.dcb_queue[i] = i;
1683                         vmdq_tx_conf.dcb_queue[i] = i;
1684                 }
1685  
1686                 /*set DCB mode of RX and TX of multiple queues*/
1687                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1688                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1689                 if (dcb_conf->pfc_en)
1690                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1691                 else
1692                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1693  
1694                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1695                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1696                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1697                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1698         }
1699         else {
1700                 struct rte_eth_dcb_rx_conf rx_conf;
1701                 struct rte_eth_dcb_tx_conf tx_conf;
1702  
1703                 /* queue mapping configuration of DCB RX and TX */
1704                 if (dcb_conf->num_tcs == ETH_4_TCS)
1705                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1706                 else
1707                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1708  
1709                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1710                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1711  
1712                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1713                         rx_conf.dcb_queue[i] = i;
1714                         tx_conf.dcb_queue[i] = i;
1715                 }
1716                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1717                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1718                 if (dcb_conf->pfc_en)
1719                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1720                 else
1721                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1722                  
1723                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1724                                 sizeof(struct rte_eth_dcb_rx_conf)));
1725                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1726                                 sizeof(struct rte_eth_dcb_tx_conf)));
1727         }
1728
1729         return 0;
1730 }
1731
1732 int
1733 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1734 {
1735         struct rte_eth_conf port_conf;
1736         struct rte_port *rte_port;
1737         int retval;
1738         uint16_t nb_vlan;
1739         uint16_t i;
1740  
1741         /* rxq and txq configuration in dcb mode */
1742         nb_rxq = 128;
1743         nb_txq = 128;
1744         rx_free_thresh = 64;
1745  
1746         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1747         /* Enter DCB configuration status */
1748         dcb_config = 1;
1749  
1750         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1751         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1752         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1753         if (retval < 0)
1754                 return retval;
1755  
1756         rte_port = &ports[pid];
1757         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1758  
1759         rte_port->rx_conf.rx_thresh = rx_thresh;
1760         rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1761         rte_port->tx_conf.tx_thresh = tx_thresh;
1762         rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1763         rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1764         /* VLAN filter */
1765         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1766         for (i = 0; i < nb_vlan; i++){
1767                 rx_vft_set(pid, vlan_tags[i], 1);
1768         }
1769  
1770         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1771         map_port_queue_stats_mapping_registers(pid, rte_port);
1772
1773         rte_port->dcb_flag = 1;
1774  
1775         return 0;
1776 }
1777
1778 #ifdef RTE_EXEC_ENV_BAREMETAL
1779 #define main _main
1780 #endif
1781
1782 int
1783 main(int argc, char** argv)
1784 {
1785         int  diag;
1786         uint8_t port_id;
1787
1788         diag = rte_eal_init(argc, argv);
1789         if (diag < 0)
1790                 rte_panic("Cannot init EAL\n");
1791
1792         if (rte_pmd_init_all())
1793                 rte_panic("Cannot init PMD\n");
1794
1795         if (rte_eal_pci_probe())
1796                 rte_panic("Cannot probe PCI\n");
1797
1798         nb_ports = (portid_t) rte_eth_dev_count();
1799         if (nb_ports == 0)
1800                 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1801                                                         "check that "
1802                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1803                           "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1804                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1805                           "configuration file\n");
1806
1807         set_def_fwd_config();
1808         if (nb_lcores == 0)
1809                 rte_panic("Empty set of forwarding logical cores - check the "
1810                           "core mask supplied in the command parameters\n");
1811
1812         argc -= diag;
1813         argv += diag;
1814         if (argc > 1)
1815                 launch_args_parse(argc, argv);
1816
1817         if (nb_rxq > nb_txq)
1818                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1819                        "but nb_txq=%d will prevent to fully test it.\n",
1820                        nb_rxq, nb_txq);
1821
1822         init_config();
1823         if (start_port(RTE_PORT_ALL) != 0)
1824                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1825
1826         /* set all ports to promiscuous mode by default */
1827         for (port_id = 0; port_id < nb_ports; port_id++)
1828                 rte_eth_promiscuous_enable(port_id);
1829
1830 #ifdef RTE_LIBRTE_CMDLINE
1831         if (interactive == 1)
1832                 prompt();
1833         else
1834 #endif
1835         {
1836                 char c;
1837                 int rc;
1838
1839                 printf("No commandline core given, start packet forwarding\n");
1840                 start_packet_forwarding(0);
1841                 printf("Press enter to exit\n");
1842                 rc = read(0, &c, 1);
1843                 if (rc < 0)
1844                         return 1;
1845         }
1846
1847         return 0;
1848 }