app/testpmd: disable RSS when in SRIOV
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is 
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG; 
101
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;        /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144         &io_fwd_engine,
145         &mac_fwd_engine,
146         &mac_retry_fwd_engine,
147         &rx_only_engine,
148         &tx_only_engine,
149         &csum_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151         &ieee1588_fwd_engine,
152 #endif
153         NULL,
154 };
155
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
161                                       * specified on command-line. */
162
163 /*
164  * Configuration of packet segments used by the "txonly" processing engine.
165  */
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168         TXONLY_DEF_PACKET_LEN,
169 };
170 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
177  
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
180  
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
183
184 /*
185  * Configurable number of RX/TX queues.
186  */
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
189
190 /*
191  * Configurable number of RX/TX ring descriptors.
192  */
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
197
198 /*
199  * Configurable values of RX and TX ring threshold registers.
200  */
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208
209 struct rte_eth_thresh rx_thresh = {
210         .pthresh = RX_PTHRESH,
211         .hthresh = RX_HTHRESH,
212         .wthresh = RX_WTHRESH,
213 };
214
215 struct rte_eth_thresh tx_thresh = {
216         .pthresh = TX_PTHRESH,
217         .hthresh = TX_HTHRESH,
218         .wthresh = TX_WTHRESH,
219 };
220
221 /*
222  * Configurable value of RX free threshold.
223  */
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
225
226 /*
227  * Configurable value of RX drop enable.
228  */
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
230
231 /*
232  * Configurable value of TX free threshold.
233  */
234 uint16_t tx_free_thresh = 0; /* Use default values. */
235
236 /*
237  * Configurable value of TX RS bit threshold.
238  */
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
240
241 /*
242  * Configurable value of TX queue flags.
243  */
244 uint32_t txq_flags = 0; /* No flags set. */
245
246 /*
247  * Receive Side Scaling (RSS) configuration.
248  */
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
250
251 /*
252  * Port topology configuration
253  */
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255
256 /*
257  * Avoids to flush all the RX streams before starts forwarding.
258  */
259 uint8_t no_flush_rx = 0; /* flush by default */
260
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 
268
269 #endif
270
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276         .split_hdr_size = 0,
277         .header_split   = 0, /**< Header Split disabled. */
278         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285
286 struct rte_fdir_conf fdir_conf = {
287         .mode = RTE_FDIR_MODE_NONE,
288         .pballoc = RTE_FDIR_PBALLOC_64K,
289         .status = RTE_FDIR_REPORT_STATUS,
290         .flexbytes_offset = 0x6,
291         .drop_queue = 127,
292 };
293
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
304
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308
309 /*
310  * Check if all the ports are started.
311  * If yes, return positive value. If not, return zero.
312  */
313 static int all_ports_started(void);
314
315 /*
316  * Setup default configuration.
317  */
318 static void
319 set_default_fwd_lcores_config(void)
320 {
321         unsigned int i;
322         unsigned int nb_lc;
323
324         nb_lc = 0;
325         for (i = 0; i < RTE_MAX_LCORE; i++) {
326                 if (! rte_lcore_is_enabled(i))
327                         continue;
328                 if (i == rte_get_master_lcore())
329                         continue;
330                 fwd_lcores_cpuids[nb_lc++] = i;
331         }
332         nb_lcores = (lcoreid_t) nb_lc;
333         nb_cfg_lcores = nb_lcores;
334         nb_fwd_lcores = 1;
335 }
336
337 static void
338 set_def_peer_eth_addrs(void)
339 {
340         portid_t i;
341
342         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344                 peer_eth_addrs[i].addr_bytes[5] = i;
345         }
346 }
347
348 static void
349 set_default_fwd_ports_config(void)
350 {
351         portid_t pt_id;
352
353         for (pt_id = 0; pt_id < nb_ports; pt_id++)
354                 fwd_ports_ids[pt_id] = pt_id;
355
356         nb_cfg_ports = nb_ports;
357         nb_fwd_ports = nb_ports;
358 }
359
360 void
361 set_def_fwd_config(void)
362 {
363         set_default_fwd_lcores_config();
364         set_def_peer_eth_addrs();
365         set_default_fwd_ports_config();
366 }
367
368 /*
369  * Configuration initialisation done once at init time.
370  */
371 struct mbuf_ctor_arg {
372         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
374 };
375
376 struct mbuf_pool_ctor_arg {
377         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378 };
379
380 static void
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
382                   void *opaque_arg,
383                   void *raw_mbuf,
384                   __attribute__((unused)) unsigned i)
385 {
386         struct mbuf_ctor_arg *mb_ctor_arg;
387         struct rte_mbuf    *mb;
388
389         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390         mb = (struct rte_mbuf *) raw_mbuf;
391
392         mb->type         = RTE_MBUF_PKT;
393         mb->pool         = mp;
394         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396                         mb_ctor_arg->seg_buf_offset);
397         mb->buf_len      = mb_ctor_arg->seg_buf_size;
398         mb->type         = RTE_MBUF_PKT;
399         mb->ol_flags     = 0;
400         mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401         mb->pkt.nb_segs  = 1;
402         mb->pkt.vlan_macip.data = 0;
403         mb->pkt.hash.rss = 0;
404 }
405
406 static void
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408                        void *opaque_arg)
409 {
410         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
411         struct rte_pktmbuf_pool_private *mbp_priv;
412
413         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414                 printf("%s(%s) private_data_size %d < %d\n",
415                        __func__, mp->name, (int) mp->private_data_size,
416                        (int) sizeof(struct rte_pktmbuf_pool_private));
417                 return;
418         }
419         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420         mbp_priv = rte_mempool_get_priv(mp);
421         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422 }
423
424 static void
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426                  unsigned int socket_id)
427 {
428         char pool_name[RTE_MEMPOOL_NAMESIZE];
429         struct rte_mempool *rte_mp;
430         struct mbuf_pool_ctor_arg mbp_ctor_arg;
431         struct mbuf_ctor_arg mb_ctor_arg;
432         uint32_t mb_size;
433
434         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435                                                 mbuf_seg_size);
436         mb_ctor_arg.seg_buf_offset =
437                 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444                                    (unsigned) mb_mempool_cache,
445                                    sizeof(struct rte_pktmbuf_pool_private),
446                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447                                    testpmd_mbuf_ctor, &mb_ctor_arg,
448                                    socket_id, 0);
449
450
451
452 #else
453         if (mp_anon != 0)
454                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455                                     (unsigned) mb_mempool_cache,
456                                     sizeof(struct rte_pktmbuf_pool_private),
457                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458                                     testpmd_mbuf_ctor, &mb_ctor_arg,
459                                     socket_id, 0);
460         else 
461                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462                                     (unsigned) mb_mempool_cache,
463                                     sizeof(struct rte_pktmbuf_pool_private),
464                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465                                     testpmd_mbuf_ctor, &mb_ctor_arg,
466                                     socket_id, 0);
467
468 #endif
469
470         if (rte_mp == NULL) {
471                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472                                                 "failed\n", socket_id);
473         } else if (verbose_level > 0) {
474                 rte_mempool_dump(rte_mp);
475         }
476 }
477
478 static void
479 init_config(void)
480 {
481         portid_t pid;
482         struct rte_port *port;
483         struct rte_mempool *mbp;
484         unsigned int nb_mbuf_per_pool;
485         lcoreid_t  lc_id;
486         uint8_t port_per_socket[MAX_SOCKET];
487
488         memset(port_per_socket,0,MAX_SOCKET);
489         /* Configuration of logical cores. */
490         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
491                                 sizeof(struct fwd_lcore *) * nb_lcores,
492                                 CACHE_LINE_SIZE);
493         if (fwd_lcores == NULL) {
494                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
495                                                         "failed\n", nb_lcores);
496         }
497         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
498                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
499                                                sizeof(struct fwd_lcore),
500                                                CACHE_LINE_SIZE);
501                 if (fwd_lcores[lc_id] == NULL) {
502                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
503                                                                 "failed\n");
504                 }
505                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
506         }
507
508         /*
509          * Create pools of mbuf.
510          * If NUMA support is disabled, create a single pool of mbuf in
511          * socket 0 memory by default.
512          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
513          *
514          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
515          * nb_txd can be configured at run time.
516          */
517         if (param_total_num_mbufs) 
518                 nb_mbuf_per_pool = param_total_num_mbufs;
519         else {
520                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
521                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
522                 
523                 if (!numa_support) 
524                         nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
525         }
526
527         if (!numa_support) {
528                 if (socket_num == UMA_NO_CONFIG)
529                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
530                 else
531                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
532                                                  socket_num);
533         }
534         /*
535          * Records which Mbuf pool to use by each logical core, if needed.
536          */
537         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
538                 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
539                 if (mbp == NULL)
540                         mbp = mbuf_pool_find(0);
541                 fwd_lcores[lc_id]->mbp = mbp;
542         }
543
544         /* Configuration of Ethernet ports. */
545         ports = rte_zmalloc("testpmd: ports",
546                             sizeof(struct rte_port) * nb_ports,
547                             CACHE_LINE_SIZE);
548         if (ports == NULL) {
549                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
550                                                         "failed\n", nb_ports);
551         }
552         
553         for (pid = 0; pid < nb_ports; pid++) {
554                 port = &ports[pid];
555                 rte_eth_dev_info_get(pid, &port->dev_info);
556
557                 if (numa_support) {
558                         if (port_numa[pid] != NUMA_NO_CONFIG) 
559                                 port_per_socket[port_numa[pid]]++;
560                         else {
561                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
562                                 port_per_socket[socket_id]++; 
563                         }
564                 }
565
566                 /* set flag to initialize port/queue */
567                 port->need_reconfig = 1;
568                 port->need_reconfig_queues = 1;
569         }
570
571         if (numa_support) {
572                 uint8_t i;
573                 unsigned int nb_mbuf;
574
575                 if (param_total_num_mbufs)
576                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577
578                 for (i = 0; i < MAX_SOCKET; i++) {
579                         nb_mbuf = (nb_mbuf_per_pool * 
580                                                 port_per_socket[i]);
581                         if (nb_mbuf) 
582                                 mbuf_pool_create(mbuf_data_size,
583                                                 nb_mbuf,i);
584                 }
585         }
586         init_port_config();
587         /* Configuration of packet forwarding streams. */
588         if (init_fwd_streams() < 0)
589                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
590 }
591
592 int
593 init_fwd_streams(void)
594 {
595         portid_t pid;
596         struct rte_port *port;
597         streamid_t sm_id, nb_fwd_streams_new;
598
599         /* set socket id according to numa or not */
600         for (pid = 0; pid < nb_ports; pid++) {
601                 port = &ports[pid];
602                 if (nb_rxq > port->dev_info.max_rx_queues) {
603                         printf("Fail: nb_rxq(%d) is greater than "
604                                 "max_rx_queues(%d)\n", nb_rxq,
605                                 port->dev_info.max_rx_queues);
606                         return -1;
607                 }
608                 if (nb_txq > port->dev_info.max_tx_queues) {
609                         printf("Fail: nb_txq(%d) is greater than "
610                                 "max_tx_queues(%d)\n", nb_txq,
611                                 port->dev_info.max_tx_queues);
612                         return -1;
613                 }
614                 if (numa_support) 
615                         port->socket_id = rte_eth_dev_socket_id(pid);
616                 else {
617                         if (socket_num == UMA_NO_CONFIG)         
618                                 port->socket_id = 0;
619                         else 
620                                 port->socket_id = socket_num;   
621                 }
622         }
623
624         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
625         if (nb_fwd_streams_new == nb_fwd_streams)
626                 return 0;
627         /* clear the old */
628         if (fwd_streams != NULL) {
629                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
630                         if (fwd_streams[sm_id] == NULL)
631                                 continue;
632                         rte_free(fwd_streams[sm_id]);
633                         fwd_streams[sm_id] = NULL;
634                 }
635                 rte_free(fwd_streams);
636                 fwd_streams = NULL;
637         }
638
639         /* init new */
640         nb_fwd_streams = nb_fwd_streams_new;
641         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
642                 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
643         if (fwd_streams == NULL)
644                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
645                                                 "failed\n", nb_fwd_streams);
646
647         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
648                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
649                                 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
650                 if (fwd_streams[sm_id] == NULL)
651                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
652                                                                 " failed\n");
653         }
654
655         return 0;
656 }
657
658 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
659 static void
660 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
661 {
662         unsigned int total_burst;
663         unsigned int nb_burst;
664         unsigned int burst_stats[3];
665         uint16_t pktnb_stats[3];
666         uint16_t nb_pkt;
667         int burst_percent[3];
668
669         /*
670          * First compute the total number of packet bursts and the
671          * two highest numbers of bursts of the same number of packets.
672          */
673         total_burst = 0;
674         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
675         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
676         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
677                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
678                 if (nb_burst == 0)
679                         continue;
680                 total_burst += nb_burst;
681                 if (nb_burst > burst_stats[0]) {
682                         burst_stats[1] = burst_stats[0];
683                         pktnb_stats[1] = pktnb_stats[0];
684                         burst_stats[0] = nb_burst;
685                         pktnb_stats[0] = nb_pkt;
686                 }
687         }
688         if (total_burst == 0)
689                 return;
690         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
691         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
692                burst_percent[0], (int) pktnb_stats[0]);
693         if (burst_stats[0] == total_burst) {
694                 printf("]\n");
695                 return;
696         }
697         if (burst_stats[0] + burst_stats[1] == total_burst) {
698                 printf(" + %d%% of %d pkts]\n",
699                        100 - burst_percent[0], pktnb_stats[1]);
700                 return;
701         }
702         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
703         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
704         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
705                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
706                 return;
707         }
708         printf(" + %d%% of %d pkts + %d%% of others]\n",
709                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
710 }
711 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
712
713 static void
714 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
715 {
716         struct rte_port *port;
717         uint8_t i;
718
719         static const char *fwd_stats_border = "----------------------";
720
721         port = &ports[port_id];
722         printf("\n  %s Forward statistics for port %-2d %s\n",
723                fwd_stats_border, port_id, fwd_stats_border);
724
725         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
726                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
727                        "%-"PRIu64"\n",
728                        stats->ipackets, stats->ierrors,
729                        (uint64_t) (stats->ipackets + stats->ierrors));
730
731                 if (cur_fwd_eng == &csum_fwd_engine)
732                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
733                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
734
735                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
736                        "%-"PRIu64"\n",
737                        stats->opackets, port->tx_dropped,
738                        (uint64_t) (stats->opackets + port->tx_dropped));
739
740                 if (stats->rx_nombuf > 0)
741                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
742
743         }
744         else {
745                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
746                        "%14"PRIu64"\n",
747                        stats->ipackets, stats->ierrors,
748                        (uint64_t) (stats->ipackets + stats->ierrors));
749
750                 if (cur_fwd_eng == &csum_fwd_engine)
751                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
752                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
753
754                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
755                        "%14"PRIu64"\n",
756                        stats->opackets, port->tx_dropped,
757                        (uint64_t) (stats->opackets + port->tx_dropped));
758
759                 if (stats->rx_nombuf > 0)
760                         printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
761         }
762 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
763         if (port->rx_stream)
764                 pkt_burst_stats_display("RX",
765                         &port->rx_stream->rx_burst_stats);
766         if (port->tx_stream)
767                 pkt_burst_stats_display("TX",
768                         &port->tx_stream->tx_burst_stats);
769 #endif
770         /* stats fdir */
771         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
772                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
773                        stats->fdirmiss,
774                        stats->fdirmatch);
775
776         if (port->rx_queue_stats_mapping_enabled) {
777                 printf("\n");
778                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
779                         printf("  Stats reg %2d RX-packets:%14"PRIu64
780                                "     RX-errors:%14"PRIu64
781                                "    RX-bytes:%14"PRIu64"\n",
782                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
783                 }
784                 printf("\n");
785         }
786         if (port->tx_queue_stats_mapping_enabled) {
787                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
788                         printf("  Stats reg %2d TX-packets:%14"PRIu64
789                                "                                 TX-bytes:%14"PRIu64"\n",
790                                i, stats->q_opackets[i], stats->q_obytes[i]);
791                 }
792         }
793
794         printf("  %s--------------------------------%s\n",
795                fwd_stats_border, fwd_stats_border);
796 }
797
798 static void
799 fwd_stream_stats_display(streamid_t stream_id)
800 {
801         struct fwd_stream *fs;
802         static const char *fwd_top_stats_border = "-------";
803
804         fs = fwd_streams[stream_id];
805         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
806             (fs->fwd_dropped == 0))
807                 return;
808         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
809                "TX Port=%2d/Queue=%2d %s\n",
810                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
811                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
812         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
813                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
814
815         /* if checksum mode */
816         if (cur_fwd_eng == &csum_fwd_engine) {
817                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
818                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
819         }
820
821 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
822         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
823         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
824 #endif
825 }
826
827 static void
828 flush_fwd_rx_queues(void)
829 {
830         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
831         portid_t  rxp;
832         portid_t port_id;
833         queueid_t rxq;
834         uint16_t  nb_rx;
835         uint16_t  i;
836         uint8_t   j;
837
838         for (j = 0; j < 2; j++) {
839                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
840                         for (rxq = 0; rxq < nb_rxq; rxq++) {
841                                 port_id = fwd_ports_ids[rxp];
842                                 do {
843                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
844                                                 pkts_burst, MAX_PKT_BURST);
845                                         for (i = 0; i < nb_rx; i++)
846                                                 rte_pktmbuf_free(pkts_burst[i]);
847                                 } while (nb_rx > 0);
848                         }
849                 }
850                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
851         }
852 }
853
854 static void
855 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
856 {
857         struct fwd_stream **fsm;
858         streamid_t nb_fs;
859         streamid_t sm_id;
860
861         fsm = &fwd_streams[fc->stream_idx];
862         nb_fs = fc->stream_nb;
863         do {
864                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
865                         (*pkt_fwd)(fsm[sm_id]);
866         } while (! fc->stopped);
867 }
868
869 static int
870 start_pkt_forward_on_core(void *fwd_arg)
871 {
872         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
873                              cur_fwd_config.fwd_eng->packet_fwd);
874         return 0;
875 }
876
877 /*
878  * Run the TXONLY packet forwarding engine to send a single burst of packets.
879  * Used to start communication flows in network loopback test configurations.
880  */
881 static int
882 run_one_txonly_burst_on_core(void *fwd_arg)
883 {
884         struct fwd_lcore *fwd_lc;
885         struct fwd_lcore tmp_lcore;
886
887         fwd_lc = (struct fwd_lcore *) fwd_arg;
888         tmp_lcore = *fwd_lc;
889         tmp_lcore.stopped = 1;
890         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
891         return 0;
892 }
893
894 /*
895  * Launch packet forwarding:
896  *     - Setup per-port forwarding context.
897  *     - launch logical cores with their forwarding configuration.
898  */
899 static void
900 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
901 {
902         port_fwd_begin_t port_fwd_begin;
903         unsigned int i;
904         unsigned int lc_id;
905         int diag;
906
907         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
908         if (port_fwd_begin != NULL) {
909                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
910                         (*port_fwd_begin)(fwd_ports_ids[i]);
911         }
912         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
913                 lc_id = fwd_lcores_cpuids[i];
914                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
915                         fwd_lcores[i]->stopped = 0;
916                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
917                                                      fwd_lcores[i], lc_id);
918                         if (diag != 0)
919                                 printf("launch lcore %u failed - diag=%d\n",
920                                        lc_id, diag);
921                 }
922         }
923 }
924
925 /*
926  * Launch packet forwarding configuration.
927  */
928 void
929 start_packet_forwarding(int with_tx_first)
930 {
931         port_fwd_begin_t port_fwd_begin;
932         port_fwd_end_t  port_fwd_end;
933         struct rte_port *port;
934         unsigned int i;
935         portid_t   pt_id;
936         streamid_t sm_id;
937
938         if (all_ports_started() == 0) {
939                 printf("Not all ports were started\n");
940                 return;
941         }
942         if (test_done == 0) {
943                 printf("Packet forwarding already started\n");
944                 return;
945         }
946         if(dcb_test) {
947                 for (i = 0; i < nb_fwd_ports; i++) {
948                         pt_id = fwd_ports_ids[i];
949                         port = &ports[pt_id];
950                         if (!port->dcb_flag) {
951                                 printf("In DCB mode, all forwarding ports must "
952                                        "be configured in this mode.\n");
953                                 return;
954                         }
955                 }
956                 if (nb_fwd_lcores == 1) {
957                         printf("In DCB mode,the nb forwarding cores "
958                                "should be larger than 1.\n");
959                         return;
960                 }
961         }
962         test_done = 0;
963
964         if(!no_flush_rx)
965                 flush_fwd_rx_queues();
966
967         fwd_config_setup();
968         rxtx_config_display();
969
970         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
971                 pt_id = fwd_ports_ids[i];
972                 port = &ports[pt_id];
973                 rte_eth_stats_get(pt_id, &port->stats);
974                 port->tx_dropped = 0;
975
976                 map_port_queue_stats_mapping_registers(pt_id, port);
977         }
978         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
979                 fwd_streams[sm_id]->rx_packets = 0;
980                 fwd_streams[sm_id]->tx_packets = 0;
981                 fwd_streams[sm_id]->fwd_dropped = 0;
982                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
983                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
984
985 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
986                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
987                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
988                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
989                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
990 #endif
991 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
992                 fwd_streams[sm_id]->core_cycles = 0;
993 #endif
994         }
995         if (with_tx_first) {
996                 port_fwd_begin = tx_only_engine.port_fwd_begin;
997                 if (port_fwd_begin != NULL) {
998                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1000                 }
1001                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1002                 rte_eal_mp_wait_lcore();
1003                 port_fwd_end = tx_only_engine.port_fwd_end;
1004                 if (port_fwd_end != NULL) {
1005                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1006                                 (*port_fwd_end)(fwd_ports_ids[i]);
1007                 }
1008         }
1009         launch_packet_forwarding(start_pkt_forward_on_core);
1010 }
1011
1012 void
1013 stop_packet_forwarding(void)
1014 {
1015         struct rte_eth_stats stats;
1016         struct rte_port *port;
1017         port_fwd_end_t  port_fwd_end;
1018         int i;
1019         portid_t   pt_id;
1020         streamid_t sm_id;
1021         lcoreid_t  lc_id;
1022         uint64_t total_recv;
1023         uint64_t total_xmit;
1024         uint64_t total_rx_dropped;
1025         uint64_t total_tx_dropped;
1026         uint64_t total_rx_nombuf;
1027         uint64_t tx_dropped;
1028         uint64_t rx_bad_ip_csum;
1029         uint64_t rx_bad_l4_csum;
1030 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1031         uint64_t fwd_cycles;
1032 #endif
1033         static const char *acc_stats_border = "+++++++++++++++";
1034
1035         if (all_ports_started() == 0) {
1036                 printf("Not all ports were started\n");
1037                 return;
1038         }
1039         if (test_done) {
1040                 printf("Packet forwarding not started\n");
1041                 return;
1042         }
1043         printf("Telling cores to stop...");
1044         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1045                 fwd_lcores[lc_id]->stopped = 1;
1046         printf("\nWaiting for lcores to finish...\n");
1047         rte_eal_mp_wait_lcore();
1048         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1049         if (port_fwd_end != NULL) {
1050                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1051                         pt_id = fwd_ports_ids[i];
1052                         (*port_fwd_end)(pt_id);
1053                 }
1054         }
1055 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1056         fwd_cycles = 0;
1057 #endif
1058         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1059                 if (cur_fwd_config.nb_fwd_streams >
1060                     cur_fwd_config.nb_fwd_ports) {
1061                         fwd_stream_stats_display(sm_id);
1062                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1063                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1064                 } else {
1065                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1066                                 fwd_streams[sm_id];
1067                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1068                                 fwd_streams[sm_id];
1069                 }
1070                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1071                 tx_dropped = (uint64_t) (tx_dropped +
1072                                          fwd_streams[sm_id]->fwd_dropped);
1073                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1074
1075                 rx_bad_ip_csum =
1076                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1077                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1078                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1079                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1080                                                         rx_bad_ip_csum;
1081
1082                 rx_bad_l4_csum =
1083                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1084                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1085                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1086                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1087                                                         rx_bad_l4_csum;
1088
1089 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1090                 fwd_cycles = (uint64_t) (fwd_cycles +
1091                                          fwd_streams[sm_id]->core_cycles);
1092 #endif
1093         }
1094         total_recv = 0;
1095         total_xmit = 0;
1096         total_rx_dropped = 0;
1097         total_tx_dropped = 0;
1098         total_rx_nombuf  = 0;
1099         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100                 pt_id = fwd_ports_ids[i];
1101
1102                 port = &ports[pt_id];
1103                 rte_eth_stats_get(pt_id, &stats);
1104                 stats.ipackets -= port->stats.ipackets;
1105                 port->stats.ipackets = 0;
1106                 stats.opackets -= port->stats.opackets;
1107                 port->stats.opackets = 0;
1108                 stats.ibytes   -= port->stats.ibytes;
1109                 port->stats.ibytes = 0;
1110                 stats.obytes   -= port->stats.obytes;
1111                 port->stats.obytes = 0;
1112                 stats.ierrors  -= port->stats.ierrors;
1113                 port->stats.ierrors = 0;
1114                 stats.oerrors  -= port->stats.oerrors;
1115                 port->stats.oerrors = 0;
1116                 stats.rx_nombuf -= port->stats.rx_nombuf;
1117                 port->stats.rx_nombuf = 0;
1118                 stats.fdirmatch -= port->stats.fdirmatch;
1119                 port->stats.rx_nombuf = 0;
1120                 stats.fdirmiss -= port->stats.fdirmiss;
1121                 port->stats.rx_nombuf = 0;
1122
1123                 total_recv += stats.ipackets;
1124                 total_xmit += stats.opackets;
1125                 total_rx_dropped += stats.ierrors;
1126                 total_tx_dropped += port->tx_dropped;
1127                 total_rx_nombuf  += stats.rx_nombuf;
1128
1129                 fwd_port_stats_display(pt_id, &stats);
1130         }
1131         printf("\n  %s Accumulated forward statistics for all ports"
1132                "%s\n",
1133                acc_stats_border, acc_stats_border);
1134         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1135                "%-"PRIu64"\n"
1136                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1137                "%-"PRIu64"\n",
1138                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1139                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1140         if (total_rx_nombuf > 0)
1141                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1142         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1143                "%s\n",
1144                acc_stats_border, acc_stats_border);
1145 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1146         if (total_recv > 0)
1147                 printf("\n  CPU cycles/packet=%u (total cycles="
1148                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1149                        (unsigned int)(fwd_cycles / total_recv),
1150                        fwd_cycles, total_recv);
1151 #endif
1152         printf("\nDone.\n");
1153         test_done = 1;
1154 }
1155
1156 static int
1157 all_ports_started(void)
1158 {
1159         portid_t pi;
1160         struct rte_port *port;
1161
1162         for (pi = 0; pi < nb_ports; pi++) {
1163                 port = &ports[pi];
1164                 /* Check if there is a port which is not started */
1165                 if (port->port_status != RTE_PORT_STARTED)
1166                         return 0;
1167         }
1168
1169         /* No port is not started */
1170         return 1;
1171 }
1172
1173 int
1174 start_port(portid_t pid)
1175 {
1176         int diag, need_check_link_status = 0;
1177         portid_t pi;
1178         queueid_t qi;
1179         struct rte_port *port;
1180
1181         if (test_done == 0) {
1182                 printf("Please stop forwarding first\n");
1183                 return -1;
1184         }
1185
1186         if (init_fwd_streams() < 0) {
1187                 printf("Fail from init_fwd_streams()\n");
1188                 return -1;
1189         }
1190         
1191         if(dcb_config)
1192                 dcb_test = 1;
1193         for (pi = 0; pi < nb_ports; pi++) {
1194                 if (pid < nb_ports && pid != pi)
1195                         continue;
1196
1197                 port = &ports[pi];
1198                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1199                                                  RTE_PORT_HANDLING) == 0) {
1200                         printf("Port %d is now not stopped\n", pi);
1201                         continue;
1202                 }
1203
1204                 if (port->need_reconfig > 0) {
1205                         port->need_reconfig = 0;
1206
1207                         printf("Configuring Port %d (socket %d)\n", pi,
1208                                         rte_eth_dev_socket_id(pi));
1209                         /* configure port */
1210                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1211                                                 &(port->dev_conf));
1212                         if (diag != 0) {
1213                                 if (rte_atomic16_cmpset(&(port->port_status),
1214                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1215                                         printf("Port %d can not be set back "
1216                                                         "to stopped\n", pi);
1217                                 printf("Fail to configure port %d\n", pi);
1218                                 /* try to reconfigure port next time */
1219                                 port->need_reconfig = 1;
1220                                 return -1;
1221                         }
1222                 }
1223                 if (port->need_reconfig_queues > 0) {
1224                         port->need_reconfig_queues = 0;
1225                         /* setup tx queues */
1226                         for (qi = 0; qi < nb_txq; qi++) {
1227                                 if ((numa_support) &&
1228                                         (txring_numa[pi] != NUMA_NO_CONFIG)) 
1229                                         diag = rte_eth_tx_queue_setup(pi, qi,
1230                                                 nb_txd,txring_numa[pi],
1231                                                 &(port->tx_conf));
1232                                 else
1233                                         diag = rte_eth_tx_queue_setup(pi, qi, 
1234                                                 nb_txd,port->socket_id,
1235                                                 &(port->tx_conf));
1236                                         
1237                                 if (diag == 0)
1238                                         continue;
1239
1240                                 /* Fail to setup tx queue, return */
1241                                 if (rte_atomic16_cmpset(&(port->port_status),
1242                                                         RTE_PORT_HANDLING,
1243                                                         RTE_PORT_STOPPED) == 0)
1244                                         printf("Port %d can not be set back "
1245                                                         "to stopped\n", pi);
1246                                 printf("Fail to configure port %d tx queues\n", pi);
1247                                 /* try to reconfigure queues next time */
1248                                 port->need_reconfig_queues = 1;
1249                                 return -1;
1250                         }
1251                         /* setup rx queues */
1252                         for (qi = 0; qi < nb_rxq; qi++) {
1253                                 if ((numa_support) && 
1254                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1255                                         struct rte_mempool * mp = 
1256                                                 mbuf_pool_find(rxring_numa[pi]);
1257                                         if (mp == NULL) {
1258                                                 printf("Failed to setup RX queue:"
1259                                                         "No mempool allocation"
1260                                                         "on the socket %d\n",
1261                                                         rxring_numa[pi]);
1262                                                 return -1;
1263                                         }
1264                                         
1265                                         diag = rte_eth_rx_queue_setup(pi, qi,
1266                                              nb_rxd,rxring_numa[pi],
1267                                              &(port->rx_conf),mp);
1268                                 }
1269                                 else
1270                                         diag = rte_eth_rx_queue_setup(pi, qi, 
1271                                              nb_rxd,port->socket_id,
1272                                              &(port->rx_conf),
1273                                              mbuf_pool_find(port->socket_id));
1274
1275                                 if (diag == 0)
1276                                         continue;
1277
1278
1279                                 /* Fail to setup rx queue, return */
1280                                 if (rte_atomic16_cmpset(&(port->port_status),
1281                                                         RTE_PORT_HANDLING,
1282                                                         RTE_PORT_STOPPED) == 0)
1283                                         printf("Port %d can not be set back "
1284                                                         "to stopped\n", pi);
1285                                 printf("Fail to configure port %d rx queues\n", pi);
1286                                 /* try to reconfigure queues next time */
1287                                 port->need_reconfig_queues = 1;
1288                                 return -1;
1289                         }
1290                 }
1291                 /* start port */
1292                 if (rte_eth_dev_start(pi) < 0) {
1293                         printf("Fail to start port %d\n", pi);
1294
1295                         /* Fail to setup rx queue, return */
1296                         if (rte_atomic16_cmpset(&(port->port_status),
1297                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1298                                 printf("Port %d can not be set back to "
1299                                                         "stopped\n", pi);
1300                         continue;
1301                 }
1302
1303                 if (rte_atomic16_cmpset(&(port->port_status),
1304                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1305                         printf("Port %d can not be set into started\n", pi);
1306
1307                 /* at least one port started, need checking link status */
1308                 need_check_link_status = 1;
1309         }
1310
1311         if (need_check_link_status)
1312                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1313         else
1314                 printf("Please stop the ports first\n");
1315
1316         printf("Done\n");
1317         return 0;
1318 }
1319
1320 void
1321 stop_port(portid_t pid)
1322 {
1323         portid_t pi;
1324         struct rte_port *port;
1325         int need_check_link_status = 0;
1326
1327         if (test_done == 0) {
1328                 printf("Please stop forwarding first\n");
1329                 return;
1330         }
1331         if (dcb_test) {
1332                 dcb_test = 0;
1333                 dcb_config = 0;
1334         }
1335         printf("Stopping ports...\n");
1336
1337         for (pi = 0; pi < nb_ports; pi++) {
1338                 if (pid < nb_ports && pid != pi)
1339                         continue;
1340
1341                 port = &ports[pi];
1342                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1343                                                 RTE_PORT_HANDLING) == 0)
1344                         continue;
1345
1346                 rte_eth_dev_stop(pi);
1347
1348                 if (rte_atomic16_cmpset(&(port->port_status),
1349                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1350                         printf("Port %d can not be set into stopped\n", pi);
1351                 need_check_link_status = 1;
1352         }
1353         if (need_check_link_status)
1354                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1355
1356         printf("Done\n");
1357 }
1358
1359 void
1360 close_port(portid_t pid)
1361 {
1362         portid_t pi;
1363         struct rte_port *port;
1364
1365         if (test_done == 0) {
1366                 printf("Please stop forwarding first\n");
1367                 return;
1368         }
1369
1370         printf("Closing ports...\n");
1371
1372         for (pi = 0; pi < nb_ports; pi++) {
1373                 if (pid < nb_ports && pid != pi)
1374                         continue;
1375
1376                 port = &ports[pi];
1377                 if (rte_atomic16_cmpset(&(port->port_status),
1378                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1379                         printf("Port %d is now not stopped\n", pi);
1380                         continue;
1381                 }
1382
1383                 rte_eth_dev_close(pi);
1384
1385                 if (rte_atomic16_cmpset(&(port->port_status),
1386                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1387                         printf("Port %d can not be set into stopped\n", pi);
1388         }
1389
1390         printf("Done\n");
1391 }
1392
1393 int
1394 all_ports_stopped(void)
1395 {
1396         portid_t pi;
1397         struct rte_port *port;
1398
1399         for (pi = 0; pi < nb_ports; pi++) {
1400                 port = &ports[pi];
1401                 if (port->port_status != RTE_PORT_STOPPED)
1402                         return 0;
1403         }
1404
1405         return 1;
1406 }
1407
1408 void
1409 pmd_test_exit(void)
1410 {
1411         portid_t pt_id;
1412
1413         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1414                 printf("Stopping port %d...", pt_id);
1415                 fflush(stdout);
1416                 rte_eth_dev_close(pt_id);
1417                 printf("done\n");
1418         }
1419         printf("bye...\n");
1420 }
1421
1422 typedef void (*cmd_func_t)(void);
1423 struct pmd_test_command {
1424         const char *cmd_name;
1425         cmd_func_t cmd_func;
1426 };
1427
1428 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1429
1430 /* Check the link status of all ports in up to 9s, and print them finally */
1431 static void
1432 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1433 {
1434 #define CHECK_INTERVAL 100 /* 100ms */
1435 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1436         uint8_t portid, count, all_ports_up, print_flag = 0;
1437         struct rte_eth_link link;
1438
1439         printf("Checking link statuses...\n");
1440         fflush(stdout);
1441         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1442                 all_ports_up = 1;
1443                 for (portid = 0; portid < port_num; portid++) {
1444                         if ((port_mask & (1 << portid)) == 0)
1445                                 continue;
1446                         memset(&link, 0, sizeof(link));
1447                         rte_eth_link_get_nowait(portid, &link);
1448                         /* print link status if flag set */
1449                         if (print_flag == 1) {
1450                                 if (link.link_status)
1451                                         printf("Port %d Link Up - speed %u "
1452                                                 "Mbps - %s\n", (uint8_t)portid,
1453                                                 (unsigned)link.link_speed,
1454                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1455                                         ("full-duplex") : ("half-duplex\n"));
1456                                 else
1457                                         printf("Port %d Link Down\n",
1458                                                 (uint8_t)portid);
1459                                 continue;
1460                         }
1461                         /* clear all_ports_up flag if any link down */
1462                         if (link.link_status == 0) {
1463                                 all_ports_up = 0;
1464                                 break;
1465                         }
1466                 }
1467                 /* after finally printing all link status, get out */
1468                 if (print_flag == 1)
1469                         break;
1470
1471                 if (all_ports_up == 0) {
1472                         fflush(stdout);
1473                         rte_delay_ms(CHECK_INTERVAL);
1474                 }
1475
1476                 /* set the print_flag if all ports up or timeout */
1477                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1478                         print_flag = 1;
1479                 }
1480         }
1481 }
1482
1483 static int
1484 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1485 {
1486         uint16_t i;
1487         int diag;
1488         uint8_t mapping_found = 0;
1489
1490         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1491                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1492                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1493                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1494                                         tx_queue_stats_mappings[i].queue_id,
1495                                         tx_queue_stats_mappings[i].stats_counter_id);
1496                         if (diag != 0)
1497                                 return diag;
1498                         mapping_found = 1;
1499                 }
1500         }
1501         if (mapping_found)
1502                 port->tx_queue_stats_mapping_enabled = 1;
1503         return 0;
1504 }
1505
1506 static int
1507 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1508 {
1509         uint16_t i;
1510         int diag;
1511         uint8_t mapping_found = 0;
1512
1513         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1514                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1515                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1516                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1517                                         rx_queue_stats_mappings[i].queue_id,
1518                                         rx_queue_stats_mappings[i].stats_counter_id);
1519                         if (diag != 0)
1520                                 return diag;
1521                         mapping_found = 1;
1522                 }
1523         }
1524         if (mapping_found)
1525                 port->rx_queue_stats_mapping_enabled = 1;
1526         return 0;
1527 }
1528
1529 static void
1530 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1531 {
1532         int diag = 0;
1533
1534         diag = set_tx_queue_stats_mapping_registers(pi, port);
1535         if (diag != 0) {
1536                 if (diag == -ENOTSUP) {
1537                         port->tx_queue_stats_mapping_enabled = 0;
1538                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1539                 }
1540                 else
1541                         rte_exit(EXIT_FAILURE,
1542                                         "set_tx_queue_stats_mapping_registers "
1543                                         "failed for port id=%d diag=%d\n",
1544                                         pi, diag);
1545         }
1546
1547         diag = set_rx_queue_stats_mapping_registers(pi, port);
1548         if (diag != 0) {
1549                 if (diag == -ENOTSUP) {
1550                         port->rx_queue_stats_mapping_enabled = 0;
1551                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1552                 }
1553                 else
1554                         rte_exit(EXIT_FAILURE,
1555                                         "set_rx_queue_stats_mapping_registers "
1556                                         "failed for port id=%d diag=%d\n",
1557                                         pi, diag);
1558         }
1559 }
1560
1561 void
1562 init_port_config(void)
1563 {
1564         portid_t pid;
1565         struct rte_port *port;
1566
1567         for (pid = 0; pid < nb_ports; pid++) {
1568                 port = &ports[pid];
1569                 port->dev_conf.rxmode = rx_mode;
1570                 port->dev_conf.fdir_conf = fdir_conf;
1571                 if (nb_rxq > 1) {
1572                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1573                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1574                 } else {
1575                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1576                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1577                 }
1578
1579                 /* In SR-IOV mode, RSS mode is not available */
1580                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1581                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1582                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1583                         else
1584                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;        
1585                 }
1586
1587                 port->rx_conf.rx_thresh = rx_thresh;
1588                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1589                 port->rx_conf.rx_drop_en = rx_drop_en;
1590                 port->tx_conf.tx_thresh = tx_thresh;
1591                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1592                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1593                 port->tx_conf.txq_flags = txq_flags;
1594
1595                 rte_eth_macaddr_get(pid, &port->eth_addr);
1596
1597                 map_port_queue_stats_mapping_registers(pid, port);
1598 #ifdef RTE_NIC_BYPASS
1599                 rte_eth_dev_bypass_init(pid);
1600 #endif
1601         }
1602 }
1603
1604 const uint16_t vlan_tags[] = {
1605                 0,  1,  2,  3,  4,  5,  6,  7,
1606                 8,  9, 10, 11,  12, 13, 14, 15,
1607                 16, 17, 18, 19, 20, 21, 22, 23,
1608                 24, 25, 26, 27, 28, 29, 30, 31
1609 };
1610
1611 static  int
1612 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1613 {
1614         uint8_t i;
1615  
1616         /*
1617          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1618          * given above, and the number of traffic classes available for use.
1619          */
1620         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1621                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1622                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1623  
1624                 /* VMDQ+DCB RX and TX configrations */
1625                 vmdq_rx_conf.enable_default_pool = 0;
1626                 vmdq_rx_conf.default_pool = 0;
1627                 vmdq_rx_conf.nb_queue_pools =
1628                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1629                 vmdq_tx_conf.nb_queue_pools =
1630                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1631  
1632                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1633                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1634                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1635                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1636                 }
1637                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1638                         vmdq_rx_conf.dcb_queue[i] = i;
1639                         vmdq_tx_conf.dcb_queue[i] = i;
1640                 }
1641  
1642                 /*set DCB mode of RX and TX of multiple queues*/
1643                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1644                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1645                 if (dcb_conf->pfc_en)
1646                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1647                 else
1648                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1649  
1650                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1651                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1652                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1653                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1654         }
1655         else {
1656                 struct rte_eth_dcb_rx_conf rx_conf;
1657                 struct rte_eth_dcb_tx_conf tx_conf;
1658  
1659                 /* queue mapping configuration of DCB RX and TX */
1660                 if (dcb_conf->num_tcs == ETH_4_TCS)
1661                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1662                 else
1663                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1664  
1665                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1666                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1667  
1668                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1669                         rx_conf.dcb_queue[i] = i;
1670                         tx_conf.dcb_queue[i] = i;
1671                 }
1672                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1673                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1674                 if (dcb_conf->pfc_en)
1675                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1676                 else
1677                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1678                  
1679                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1680                                 sizeof(struct rte_eth_dcb_rx_conf)));
1681                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1682                                 sizeof(struct rte_eth_dcb_tx_conf)));
1683         }
1684
1685         return 0;
1686 }
1687
1688 int
1689 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1690 {
1691         struct rte_eth_conf port_conf;
1692         struct rte_port *rte_port;
1693         int retval;
1694         uint16_t nb_vlan;
1695         uint16_t i;
1696  
1697         /* rxq and txq configuration in dcb mode */
1698         nb_rxq = 128;
1699         nb_txq = 128;
1700         rx_free_thresh = 64;
1701  
1702         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1703         /* Enter DCB configuration status */
1704         dcb_config = 1;
1705  
1706         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1707         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1708         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1709         if (retval < 0)
1710                 return retval;
1711  
1712         rte_port = &ports[pid];
1713         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1714  
1715         rte_port->rx_conf.rx_thresh = rx_thresh;
1716         rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1717         rte_port->tx_conf.tx_thresh = tx_thresh;
1718         rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1719         rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1720         /* VLAN filter */
1721         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1722         for (i = 0; i < nb_vlan; i++){
1723                 rx_vft_set(pid, vlan_tags[i], 1);
1724         }
1725  
1726         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1727         map_port_queue_stats_mapping_registers(pid, rte_port);
1728
1729         rte_port->dcb_flag = 1;
1730  
1731         return 0;
1732 }
1733
1734 #ifdef RTE_EXEC_ENV_BAREMETAL
1735 #define main _main
1736 #endif
1737
1738 int
1739 main(int argc, char** argv)
1740 {
1741         int  diag;
1742         uint8_t port_id;
1743
1744         diag = rte_eal_init(argc, argv);
1745         if (diag < 0)
1746                 rte_panic("Cannot init EAL\n");
1747
1748         if (rte_pmd_init_all())
1749                 rte_panic("Cannot init PMD\n");
1750
1751         if (rte_eal_pci_probe())
1752                 rte_panic("Cannot probe PCI\n");
1753
1754         nb_ports = (portid_t) rte_eth_dev_count();
1755         if (nb_ports == 0)
1756                 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1757                                                         "check that "
1758                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1759                           "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1760                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1761                           "configuration file\n");
1762
1763         set_def_fwd_config();
1764         if (nb_lcores == 0)
1765                 rte_panic("Empty set of forwarding logical cores - check the "
1766                           "core mask supplied in the command parameters\n");
1767
1768         argc -= diag;
1769         argv += diag;
1770         if (argc > 1)
1771                 launch_args_parse(argc, argv);
1772
1773         if (nb_rxq > nb_txq)
1774                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1775                        "but nb_txq=%d will prevent to fully test it.\n",
1776                        nb_rxq, nb_txq);
1777
1778         init_config();
1779         if (start_port(RTE_PORT_ALL) != 0)
1780                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1781
1782         /* set all ports to promiscuous mode by default */
1783         for (port_id = 0; port_id < nb_ports; port_id++)
1784                 rte_eth_promiscuous_enable(port_id);
1785
1786         if (interactive == 1)
1787                 prompt();
1788         else {
1789                 char c;
1790                 int rc;
1791
1792                 printf("No commandline core given, start packet forwarding\n");
1793                 start_packet_forwarding(0);
1794                 printf("Press enter to exit\n");
1795                 rc = read(0, &c, 1);
1796                 if (rc < 0)
1797                         return 1;
1798         }
1799
1800         return 0;
1801 }