app/testpmd: add macswap forwarding engine
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is 
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG; 
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &rx_only_engine,
150         &tx_only_engine,
151         &csum_fwd_engine,
152 #ifdef RTE_LIBRTE_IEEE1588
153         &ieee1588_fwd_engine,
154 #endif
155         NULL,
156 };
157
158 struct fwd_config cur_fwd_config;
159 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
160
161 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
162 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
163                                       * specified on command-line. */
164
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170         TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173
174 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
175 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
176
177 /* current configuration is in DCB or not,0 means it is not in DCB mode */
178 uint8_t dcb_config = 0;
179  
180 /* Whether the dcb is in testing status */
181 uint8_t dcb_test = 0;
182  
183 /* DCB on and VT on mapping is default */
184 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
185
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199
200 /*
201  * Configurable values of RX and TX ring threshold registers.
202  */
203 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
204 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
205 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
206
207 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
208 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
209 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
210
211 struct rte_eth_thresh rx_thresh = {
212         .pthresh = RX_PTHRESH,
213         .hthresh = RX_HTHRESH,
214         .wthresh = RX_WTHRESH,
215 };
216
217 struct rte_eth_thresh tx_thresh = {
218         .pthresh = TX_PTHRESH,
219         .hthresh = TX_HTHRESH,
220         .wthresh = TX_WTHRESH,
221 };
222
223 /*
224  * Configurable value of RX free threshold.
225  */
226 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
227
228 /*
229  * Configurable value of RX drop enable.
230  */
231 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
232
233 /*
234  * Configurable value of TX free threshold.
235  */
236 uint16_t tx_free_thresh = 0; /* Use default values. */
237
238 /*
239  * Configurable value of TX RS bit threshold.
240  */
241 uint16_t tx_rs_thresh = 0; /* Use default values. */
242
243 /*
244  * Configurable value of TX queue flags.
245  */
246 uint32_t txq_flags = 0; /* No flags set. */
247
248 /*
249  * Receive Side Scaling (RSS) configuration.
250  */
251 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
252
253 /*
254  * Port topology configuration
255  */
256 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
257
258 /*
259  * Avoids to flush all the RX streams before starts forwarding.
260  */
261 uint8_t no_flush_rx = 0; /* flush by default */
262
263 /*
264  * NIC bypass mode configuration options.
265  */
266 #ifdef RTE_NIC_BYPASS
267
268 /* The NIC bypass watchdog timeout. */
269 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; 
270
271 #endif
272
273 /*
274  * Ethernet device configuration.
275  */
276 struct rte_eth_rxmode rx_mode = {
277         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
278         .split_hdr_size = 0,
279         .header_split   = 0, /**< Header Split disabled. */
280         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
281         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
282         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
283         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
284         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
285         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
286 };
287
288 struct rte_fdir_conf fdir_conf = {
289         .mode = RTE_FDIR_MODE_NONE,
290         .pballoc = RTE_FDIR_PBALLOC_64K,
291         .status = RTE_FDIR_REPORT_STATUS,
292         .flexbytes_offset = 0x6,
293         .drop_queue = 127,
294 };
295
296 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
297
298 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
300
301 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
302 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
303
304 uint16_t nb_tx_queue_stats_mappings = 0;
305 uint16_t nb_rx_queue_stats_mappings = 0;
306
307 /* Forward function declarations */
308 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
309 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
310
311 /*
312  * Check if all the ports are started.
313  * If yes, return positive value. If not, return zero.
314  */
315 static int all_ports_started(void);
316
317 /*
318  * Setup default configuration.
319  */
320 static void
321 set_default_fwd_lcores_config(void)
322 {
323         unsigned int i;
324         unsigned int nb_lc;
325
326         nb_lc = 0;
327         for (i = 0; i < RTE_MAX_LCORE; i++) {
328                 if (! rte_lcore_is_enabled(i))
329                         continue;
330                 if (i == rte_get_master_lcore())
331                         continue;
332                 fwd_lcores_cpuids[nb_lc++] = i;
333         }
334         nb_lcores = (lcoreid_t) nb_lc;
335         nb_cfg_lcores = nb_lcores;
336         nb_fwd_lcores = 1;
337 }
338
339 static void
340 set_def_peer_eth_addrs(void)
341 {
342         portid_t i;
343
344         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
345                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
346                 peer_eth_addrs[i].addr_bytes[5] = i;
347         }
348 }
349
350 static void
351 set_default_fwd_ports_config(void)
352 {
353         portid_t pt_id;
354
355         for (pt_id = 0; pt_id < nb_ports; pt_id++)
356                 fwd_ports_ids[pt_id] = pt_id;
357
358         nb_cfg_ports = nb_ports;
359         nb_fwd_ports = nb_ports;
360 }
361
362 void
363 set_def_fwd_config(void)
364 {
365         set_default_fwd_lcores_config();
366         set_def_peer_eth_addrs();
367         set_default_fwd_ports_config();
368 }
369
370 /*
371  * Configuration initialisation done once at init time.
372  */
373 struct mbuf_ctor_arg {
374         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
375         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
376 };
377
378 struct mbuf_pool_ctor_arg {
379         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
380 };
381
382 static void
383 testpmd_mbuf_ctor(struct rte_mempool *mp,
384                   void *opaque_arg,
385                   void *raw_mbuf,
386                   __attribute__((unused)) unsigned i)
387 {
388         struct mbuf_ctor_arg *mb_ctor_arg;
389         struct rte_mbuf    *mb;
390
391         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
392         mb = (struct rte_mbuf *) raw_mbuf;
393
394         mb->type         = RTE_MBUF_PKT;
395         mb->pool         = mp;
396         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
397         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
398                         mb_ctor_arg->seg_buf_offset);
399         mb->buf_len      = mb_ctor_arg->seg_buf_size;
400         mb->type         = RTE_MBUF_PKT;
401         mb->ol_flags     = 0;
402         mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
403         mb->pkt.nb_segs  = 1;
404         mb->pkt.vlan_macip.data = 0;
405         mb->pkt.hash.rss = 0;
406 }
407
408 static void
409 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
410                        void *opaque_arg)
411 {
412         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
413         struct rte_pktmbuf_pool_private *mbp_priv;
414
415         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
416                 printf("%s(%s) private_data_size %d < %d\n",
417                        __func__, mp->name, (int) mp->private_data_size,
418                        (int) sizeof(struct rte_pktmbuf_pool_private));
419                 return;
420         }
421         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
422         mbp_priv = rte_mempool_get_priv(mp);
423         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
424 }
425
426 static void
427 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
428                  unsigned int socket_id)
429 {
430         char pool_name[RTE_MEMPOOL_NAMESIZE];
431         struct rte_mempool *rte_mp;
432         struct mbuf_pool_ctor_arg mbp_ctor_arg;
433         struct mbuf_ctor_arg mb_ctor_arg;
434         uint32_t mb_size;
435
436         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
437                                                 mbuf_seg_size);
438         mb_ctor_arg.seg_buf_offset =
439                 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
440         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
441         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
442         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
443
444 #ifdef RTE_LIBRTE_PMD_XENVIRT
445         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
446                                    (unsigned) mb_mempool_cache,
447                                    sizeof(struct rte_pktmbuf_pool_private),
448                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
449                                    testpmd_mbuf_ctor, &mb_ctor_arg,
450                                    socket_id, 0);
451
452
453
454 #else
455         if (mp_anon != 0)
456                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
457                                     (unsigned) mb_mempool_cache,
458                                     sizeof(struct rte_pktmbuf_pool_private),
459                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
460                                     testpmd_mbuf_ctor, &mb_ctor_arg,
461                                     socket_id, 0);
462         else 
463                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
464                                     (unsigned) mb_mempool_cache,
465                                     sizeof(struct rte_pktmbuf_pool_private),
466                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467                                     testpmd_mbuf_ctor, &mb_ctor_arg,
468                                     socket_id, 0);
469
470 #endif
471
472         if (rte_mp == NULL) {
473                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
474                                                 "failed\n", socket_id);
475         } else if (verbose_level > 0) {
476                 rte_mempool_dump(rte_mp);
477         }
478 }
479
480 /*
481  * Check given socket id is valid or not with NUMA mode,
482  * if valid, return 0, else return -1
483  */
484 static int
485 check_socket_id(const unsigned int socket_id)
486 {
487         static int warning_once = 0;
488
489         if (socket_id >= MAX_SOCKET) {
490                 if (!warning_once && numa_support)
491                         printf("Warning: NUMA should be configured manually by"
492                                " using --port-numa-config and"
493                                " --ring-numa-config parameters along with"
494                                " --numa.\n");
495                 warning_once = 1;
496                 return -1;
497         }
498         return 0;
499 }
500
501 static void
502 init_config(void)
503 {
504         portid_t pid;
505         struct rte_port *port;
506         struct rte_mempool *mbp;
507         unsigned int nb_mbuf_per_pool;
508         lcoreid_t  lc_id;
509         uint8_t port_per_socket[MAX_SOCKET];
510
511         memset(port_per_socket,0,MAX_SOCKET);
512         /* Configuration of logical cores. */
513         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
514                                 sizeof(struct fwd_lcore *) * nb_lcores,
515                                 CACHE_LINE_SIZE);
516         if (fwd_lcores == NULL) {
517                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
518                                                         "failed\n", nb_lcores);
519         }
520         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
521                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
522                                                sizeof(struct fwd_lcore),
523                                                CACHE_LINE_SIZE);
524                 if (fwd_lcores[lc_id] == NULL) {
525                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
526                                                                 "failed\n");
527                 }
528                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
529         }
530
531         /*
532          * Create pools of mbuf.
533          * If NUMA support is disabled, create a single pool of mbuf in
534          * socket 0 memory by default.
535          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
536          *
537          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
538          * nb_txd can be configured at run time.
539          */
540         if (param_total_num_mbufs) 
541                 nb_mbuf_per_pool = param_total_num_mbufs;
542         else {
543                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
544                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
545                 
546                 if (!numa_support) 
547                         nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
548         }
549
550         if (!numa_support) {
551                 if (socket_num == UMA_NO_CONFIG)
552                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
553                 else
554                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
555                                                  socket_num);
556         }
557
558         /* Configuration of Ethernet ports. */
559         ports = rte_zmalloc("testpmd: ports",
560                             sizeof(struct rte_port) * nb_ports,
561                             CACHE_LINE_SIZE);
562         if (ports == NULL) {
563                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
564                                                         "failed\n", nb_ports);
565         }
566         
567         for (pid = 0; pid < nb_ports; pid++) {
568                 port = &ports[pid];
569                 rte_eth_dev_info_get(pid, &port->dev_info);
570
571                 if (numa_support) {
572                         if (port_numa[pid] != NUMA_NO_CONFIG) 
573                                 port_per_socket[port_numa[pid]]++;
574                         else {
575                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
576
577                                 /* if socket_id is invalid, set to 0 */
578                                 if (check_socket_id(socket_id) < 0)
579                                         socket_id = 0;
580                                 port_per_socket[socket_id]++; 
581                         }
582                 }
583
584                 /* set flag to initialize port/queue */
585                 port->need_reconfig = 1;
586                 port->need_reconfig_queues = 1;
587         }
588
589         if (numa_support) {
590                 uint8_t i;
591                 unsigned int nb_mbuf;
592
593                 if (param_total_num_mbufs)
594                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
595
596                 for (i = 0; i < MAX_SOCKET; i++) {
597                         nb_mbuf = (nb_mbuf_per_pool * 
598                                                 port_per_socket[i]);
599                         if (nb_mbuf) 
600                                 mbuf_pool_create(mbuf_data_size,
601                                                 nb_mbuf,i);
602                 }
603         }
604         init_port_config();
605
606         /*
607          * Records which Mbuf pool to use by each logical core, if needed.
608          */
609         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
610                 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
611                 if (mbp == NULL)
612                         mbp = mbuf_pool_find(0);
613                 fwd_lcores[lc_id]->mbp = mbp;
614         }
615
616         /* Configuration of packet forwarding streams. */
617         if (init_fwd_streams() < 0)
618                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
619 }
620
621 int
622 init_fwd_streams(void)
623 {
624         portid_t pid;
625         struct rte_port *port;
626         streamid_t sm_id, nb_fwd_streams_new;
627
628         /* set socket id according to numa or not */
629         for (pid = 0; pid < nb_ports; pid++) {
630                 port = &ports[pid];
631                 if (nb_rxq > port->dev_info.max_rx_queues) {
632                         printf("Fail: nb_rxq(%d) is greater than "
633                                 "max_rx_queues(%d)\n", nb_rxq,
634                                 port->dev_info.max_rx_queues);
635                         return -1;
636                 }
637                 if (nb_txq > port->dev_info.max_tx_queues) {
638                         printf("Fail: nb_txq(%d) is greater than "
639                                 "max_tx_queues(%d)\n", nb_txq,
640                                 port->dev_info.max_tx_queues);
641                         return -1;
642                 }
643                 if (numa_support) {
644                         if (port_numa[pid] != NUMA_NO_CONFIG)
645                                 port->socket_id = port_numa[pid];
646                         else {
647                                 port->socket_id = rte_eth_dev_socket_id(pid);
648
649                                 /* if socket_id is invalid, set to 0 */
650                                 if (check_socket_id(port->socket_id) < 0)
651                                         port->socket_id = 0;
652                         }
653                 }
654                 else {
655                         if (socket_num == UMA_NO_CONFIG)         
656                                 port->socket_id = 0;
657                         else 
658                                 port->socket_id = socket_num;   
659                 }
660         }
661
662         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
663         if (nb_fwd_streams_new == nb_fwd_streams)
664                 return 0;
665         /* clear the old */
666         if (fwd_streams != NULL) {
667                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668                         if (fwd_streams[sm_id] == NULL)
669                                 continue;
670                         rte_free(fwd_streams[sm_id]);
671                         fwd_streams[sm_id] = NULL;
672                 }
673                 rte_free(fwd_streams);
674                 fwd_streams = NULL;
675         }
676
677         /* init new */
678         nb_fwd_streams = nb_fwd_streams_new;
679         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680                 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
681         if (fwd_streams == NULL)
682                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683                                                 "failed\n", nb_fwd_streams);
684
685         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687                                 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
688                 if (fwd_streams[sm_id] == NULL)
689                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
690                                                                 " failed\n");
691         }
692
693         return 0;
694 }
695
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 static void
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 {
700         unsigned int total_burst;
701         unsigned int nb_burst;
702         unsigned int burst_stats[3];
703         uint16_t pktnb_stats[3];
704         uint16_t nb_pkt;
705         int burst_percent[3];
706
707         /*
708          * First compute the total number of packet bursts and the
709          * two highest numbers of bursts of the same number of packets.
710          */
711         total_burst = 0;
712         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
716                 if (nb_burst == 0)
717                         continue;
718                 total_burst += nb_burst;
719                 if (nb_burst > burst_stats[0]) {
720                         burst_stats[1] = burst_stats[0];
721                         pktnb_stats[1] = pktnb_stats[0];
722                         burst_stats[0] = nb_burst;
723                         pktnb_stats[0] = nb_pkt;
724                 }
725         }
726         if (total_burst == 0)
727                 return;
728         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730                burst_percent[0], (int) pktnb_stats[0]);
731         if (burst_stats[0] == total_burst) {
732                 printf("]\n");
733                 return;
734         }
735         if (burst_stats[0] + burst_stats[1] == total_burst) {
736                 printf(" + %d%% of %d pkts]\n",
737                        100 - burst_percent[0], pktnb_stats[1]);
738                 return;
739         }
740         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
744                 return;
745         }
746         printf(" + %d%% of %d pkts + %d%% of others]\n",
747                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 }
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
750
751 static void
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 {
754         struct rte_port *port;
755         uint8_t i;
756
757         static const char *fwd_stats_border = "----------------------";
758
759         port = &ports[port_id];
760         printf("\n  %s Forward statistics for port %-2d %s\n",
761                fwd_stats_border, port_id, fwd_stats_border);
762
763         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765                        "%-"PRIu64"\n",
766                        stats->ipackets, stats->ierrors,
767                        (uint64_t) (stats->ipackets + stats->ierrors));
768
769                 if (cur_fwd_eng == &csum_fwd_engine)
770                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772
773                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
774                        "%-"PRIu64"\n",
775                        stats->opackets, port->tx_dropped,
776                        (uint64_t) (stats->opackets + port->tx_dropped));
777
778                 if (stats->rx_nombuf > 0)
779                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
780
781         }
782         else {
783                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
784                        "%14"PRIu64"\n",
785                        stats->ipackets, stats->ierrors,
786                        (uint64_t) (stats->ipackets + stats->ierrors));
787
788                 if (cur_fwd_eng == &csum_fwd_engine)
789                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
790                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791
792                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
793                        "%14"PRIu64"\n",
794                        stats->opackets, port->tx_dropped,
795                        (uint64_t) (stats->opackets + port->tx_dropped));
796
797                 if (stats->rx_nombuf > 0)
798                         printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
799         }
800
801         /* Display statistics of XON/XOFF pause frames, if any. */
802         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
803              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
804                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
805                        stats->rx_pause_xoff, stats->rx_pause_xon);
806                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
807                        stats->tx_pause_xoff, stats->tx_pause_xon);
808         }
809
810 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
811         if (port->rx_stream)
812                 pkt_burst_stats_display("RX",
813                         &port->rx_stream->rx_burst_stats);
814         if (port->tx_stream)
815                 pkt_burst_stats_display("TX",
816                         &port->tx_stream->tx_burst_stats);
817 #endif
818         /* stats fdir */
819         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
820                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
821                        stats->fdirmiss,
822                        stats->fdirmatch);
823
824         if (port->rx_queue_stats_mapping_enabled) {
825                 printf("\n");
826                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
827                         printf("  Stats reg %2d RX-packets:%14"PRIu64
828                                "     RX-errors:%14"PRIu64
829                                "    RX-bytes:%14"PRIu64"\n",
830                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
831                 }
832                 printf("\n");
833         }
834         if (port->tx_queue_stats_mapping_enabled) {
835                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
836                         printf("  Stats reg %2d TX-packets:%14"PRIu64
837                                "                                 TX-bytes:%14"PRIu64"\n",
838                                i, stats->q_opackets[i], stats->q_obytes[i]);
839                 }
840         }
841
842         printf("  %s--------------------------------%s\n",
843                fwd_stats_border, fwd_stats_border);
844 }
845
846 static void
847 fwd_stream_stats_display(streamid_t stream_id)
848 {
849         struct fwd_stream *fs;
850         static const char *fwd_top_stats_border = "-------";
851
852         fs = fwd_streams[stream_id];
853         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
854             (fs->fwd_dropped == 0))
855                 return;
856         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
857                "TX Port=%2d/Queue=%2d %s\n",
858                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
859                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
860         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
861                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
862
863         /* if checksum mode */
864         if (cur_fwd_eng == &csum_fwd_engine) {
865                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
866                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
867         }
868
869 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
870         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
871         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
872 #endif
873 }
874
875 static void
876 flush_fwd_rx_queues(void)
877 {
878         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
879         portid_t  rxp;
880         portid_t port_id;
881         queueid_t rxq;
882         uint16_t  nb_rx;
883         uint16_t  i;
884         uint8_t   j;
885
886         for (j = 0; j < 2; j++) {
887                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
888                         for (rxq = 0; rxq < nb_rxq; rxq++) {
889                                 port_id = fwd_ports_ids[rxp];
890                                 do {
891                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
892                                                 pkts_burst, MAX_PKT_BURST);
893                                         for (i = 0; i < nb_rx; i++)
894                                                 rte_pktmbuf_free(pkts_burst[i]);
895                                 } while (nb_rx > 0);
896                         }
897                 }
898                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
899         }
900 }
901
902 static void
903 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
904 {
905         struct fwd_stream **fsm;
906         streamid_t nb_fs;
907         streamid_t sm_id;
908
909         fsm = &fwd_streams[fc->stream_idx];
910         nb_fs = fc->stream_nb;
911         do {
912                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
913                         (*pkt_fwd)(fsm[sm_id]);
914         } while (! fc->stopped);
915 }
916
917 static int
918 start_pkt_forward_on_core(void *fwd_arg)
919 {
920         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
921                              cur_fwd_config.fwd_eng->packet_fwd);
922         return 0;
923 }
924
925 /*
926  * Run the TXONLY packet forwarding engine to send a single burst of packets.
927  * Used to start communication flows in network loopback test configurations.
928  */
929 static int
930 run_one_txonly_burst_on_core(void *fwd_arg)
931 {
932         struct fwd_lcore *fwd_lc;
933         struct fwd_lcore tmp_lcore;
934
935         fwd_lc = (struct fwd_lcore *) fwd_arg;
936         tmp_lcore = *fwd_lc;
937         tmp_lcore.stopped = 1;
938         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
939         return 0;
940 }
941
942 /*
943  * Launch packet forwarding:
944  *     - Setup per-port forwarding context.
945  *     - launch logical cores with their forwarding configuration.
946  */
947 static void
948 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
949 {
950         port_fwd_begin_t port_fwd_begin;
951         unsigned int i;
952         unsigned int lc_id;
953         int diag;
954
955         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
956         if (port_fwd_begin != NULL) {
957                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
958                         (*port_fwd_begin)(fwd_ports_ids[i]);
959         }
960         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
961                 lc_id = fwd_lcores_cpuids[i];
962                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
963                         fwd_lcores[i]->stopped = 0;
964                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
965                                                      fwd_lcores[i], lc_id);
966                         if (diag != 0)
967                                 printf("launch lcore %u failed - diag=%d\n",
968                                        lc_id, diag);
969                 }
970         }
971 }
972
973 /*
974  * Launch packet forwarding configuration.
975  */
976 void
977 start_packet_forwarding(int with_tx_first)
978 {
979         port_fwd_begin_t port_fwd_begin;
980         port_fwd_end_t  port_fwd_end;
981         struct rte_port *port;
982         unsigned int i;
983         portid_t   pt_id;
984         streamid_t sm_id;
985
986         if (all_ports_started() == 0) {
987                 printf("Not all ports were started\n");
988                 return;
989         }
990         if (test_done == 0) {
991                 printf("Packet forwarding already started\n");
992                 return;
993         }
994         if(dcb_test) {
995                 for (i = 0; i < nb_fwd_ports; i++) {
996                         pt_id = fwd_ports_ids[i];
997                         port = &ports[pt_id];
998                         if (!port->dcb_flag) {
999                                 printf("In DCB mode, all forwarding ports must "
1000                                        "be configured in this mode.\n");
1001                                 return;
1002                         }
1003                 }
1004                 if (nb_fwd_lcores == 1) {
1005                         printf("In DCB mode,the nb forwarding cores "
1006                                "should be larger than 1.\n");
1007                         return;
1008                 }
1009         }
1010         test_done = 0;
1011
1012         if(!no_flush_rx)
1013                 flush_fwd_rx_queues();
1014
1015         fwd_config_setup();
1016         rxtx_config_display();
1017
1018         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1019                 pt_id = fwd_ports_ids[i];
1020                 port = &ports[pt_id];
1021                 rte_eth_stats_get(pt_id, &port->stats);
1022                 port->tx_dropped = 0;
1023
1024                 map_port_queue_stats_mapping_registers(pt_id, port);
1025         }
1026         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1027                 fwd_streams[sm_id]->rx_packets = 0;
1028                 fwd_streams[sm_id]->tx_packets = 0;
1029                 fwd_streams[sm_id]->fwd_dropped = 0;
1030                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1031                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1032
1033 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1034                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1035                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1036                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1037                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1038 #endif
1039 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1040                 fwd_streams[sm_id]->core_cycles = 0;
1041 #endif
1042         }
1043         if (with_tx_first) {
1044                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1045                 if (port_fwd_begin != NULL) {
1046                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1047                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1048                 }
1049                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1050                 rte_eal_mp_wait_lcore();
1051                 port_fwd_end = tx_only_engine.port_fwd_end;
1052                 if (port_fwd_end != NULL) {
1053                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1054                                 (*port_fwd_end)(fwd_ports_ids[i]);
1055                 }
1056         }
1057         launch_packet_forwarding(start_pkt_forward_on_core);
1058 }
1059
1060 void
1061 stop_packet_forwarding(void)
1062 {
1063         struct rte_eth_stats stats;
1064         struct rte_port *port;
1065         port_fwd_end_t  port_fwd_end;
1066         int i;
1067         portid_t   pt_id;
1068         streamid_t sm_id;
1069         lcoreid_t  lc_id;
1070         uint64_t total_recv;
1071         uint64_t total_xmit;
1072         uint64_t total_rx_dropped;
1073         uint64_t total_tx_dropped;
1074         uint64_t total_rx_nombuf;
1075         uint64_t tx_dropped;
1076         uint64_t rx_bad_ip_csum;
1077         uint64_t rx_bad_l4_csum;
1078 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1079         uint64_t fwd_cycles;
1080 #endif
1081         static const char *acc_stats_border = "+++++++++++++++";
1082
1083         if (all_ports_started() == 0) {
1084                 printf("Not all ports were started\n");
1085                 return;
1086         }
1087         if (test_done) {
1088                 printf("Packet forwarding not started\n");
1089                 return;
1090         }
1091         printf("Telling cores to stop...");
1092         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1093                 fwd_lcores[lc_id]->stopped = 1;
1094         printf("\nWaiting for lcores to finish...\n");
1095         rte_eal_mp_wait_lcore();
1096         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1097         if (port_fwd_end != NULL) {
1098                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1099                         pt_id = fwd_ports_ids[i];
1100                         (*port_fwd_end)(pt_id);
1101                 }
1102         }
1103 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1104         fwd_cycles = 0;
1105 #endif
1106         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1107                 if (cur_fwd_config.nb_fwd_streams >
1108                     cur_fwd_config.nb_fwd_ports) {
1109                         fwd_stream_stats_display(sm_id);
1110                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1111                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1112                 } else {
1113                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1114                                 fwd_streams[sm_id];
1115                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1116                                 fwd_streams[sm_id];
1117                 }
1118                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1119                 tx_dropped = (uint64_t) (tx_dropped +
1120                                          fwd_streams[sm_id]->fwd_dropped);
1121                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1122
1123                 rx_bad_ip_csum =
1124                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1125                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1126                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1127                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1128                                                         rx_bad_ip_csum;
1129
1130                 rx_bad_l4_csum =
1131                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1132                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1133                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1134                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1135                                                         rx_bad_l4_csum;
1136
1137 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1138                 fwd_cycles = (uint64_t) (fwd_cycles +
1139                                          fwd_streams[sm_id]->core_cycles);
1140 #endif
1141         }
1142         total_recv = 0;
1143         total_xmit = 0;
1144         total_rx_dropped = 0;
1145         total_tx_dropped = 0;
1146         total_rx_nombuf  = 0;
1147         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1148                 pt_id = fwd_ports_ids[i];
1149
1150                 port = &ports[pt_id];
1151                 rte_eth_stats_get(pt_id, &stats);
1152                 stats.ipackets -= port->stats.ipackets;
1153                 port->stats.ipackets = 0;
1154                 stats.opackets -= port->stats.opackets;
1155                 port->stats.opackets = 0;
1156                 stats.ibytes   -= port->stats.ibytes;
1157                 port->stats.ibytes = 0;
1158                 stats.obytes   -= port->stats.obytes;
1159                 port->stats.obytes = 0;
1160                 stats.ierrors  -= port->stats.ierrors;
1161                 port->stats.ierrors = 0;
1162                 stats.oerrors  -= port->stats.oerrors;
1163                 port->stats.oerrors = 0;
1164                 stats.rx_nombuf -= port->stats.rx_nombuf;
1165                 port->stats.rx_nombuf = 0;
1166                 stats.fdirmatch -= port->stats.fdirmatch;
1167                 port->stats.rx_nombuf = 0;
1168                 stats.fdirmiss -= port->stats.fdirmiss;
1169                 port->stats.rx_nombuf = 0;
1170
1171                 total_recv += stats.ipackets;
1172                 total_xmit += stats.opackets;
1173                 total_rx_dropped += stats.ierrors;
1174                 total_tx_dropped += port->tx_dropped;
1175                 total_rx_nombuf  += stats.rx_nombuf;
1176
1177                 fwd_port_stats_display(pt_id, &stats);
1178         }
1179         printf("\n  %s Accumulated forward statistics for all ports"
1180                "%s\n",
1181                acc_stats_border, acc_stats_border);
1182         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1183                "%-"PRIu64"\n"
1184                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1185                "%-"PRIu64"\n",
1186                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1187                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1188         if (total_rx_nombuf > 0)
1189                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1190         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1191                "%s\n",
1192                acc_stats_border, acc_stats_border);
1193 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1194         if (total_recv > 0)
1195                 printf("\n  CPU cycles/packet=%u (total cycles="
1196                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1197                        (unsigned int)(fwd_cycles / total_recv),
1198                        fwd_cycles, total_recv);
1199 #endif
1200         printf("\nDone.\n");
1201         test_done = 1;
1202 }
1203
1204 static int
1205 all_ports_started(void)
1206 {
1207         portid_t pi;
1208         struct rte_port *port;
1209
1210         for (pi = 0; pi < nb_ports; pi++) {
1211                 port = &ports[pi];
1212                 /* Check if there is a port which is not started */
1213                 if (port->port_status != RTE_PORT_STARTED)
1214                         return 0;
1215         }
1216
1217         /* No port is not started */
1218         return 1;
1219 }
1220
1221 int
1222 start_port(portid_t pid)
1223 {
1224         int diag, need_check_link_status = 0;
1225         portid_t pi;
1226         queueid_t qi;
1227         struct rte_port *port;
1228
1229         if (test_done == 0) {
1230                 printf("Please stop forwarding first\n");
1231                 return -1;
1232         }
1233
1234         if (init_fwd_streams() < 0) {
1235                 printf("Fail from init_fwd_streams()\n");
1236                 return -1;
1237         }
1238         
1239         if(dcb_config)
1240                 dcb_test = 1;
1241         for (pi = 0; pi < nb_ports; pi++) {
1242                 if (pid < nb_ports && pid != pi)
1243                         continue;
1244
1245                 port = &ports[pi];
1246                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1247                                                  RTE_PORT_HANDLING) == 0) {
1248                         printf("Port %d is now not stopped\n", pi);
1249                         continue;
1250                 }
1251
1252                 if (port->need_reconfig > 0) {
1253                         port->need_reconfig = 0;
1254
1255                         printf("Configuring Port %d (socket %u)\n", pi,
1256                                         port->socket_id);
1257                         /* configure port */
1258                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1259                                                 &(port->dev_conf));
1260                         if (diag != 0) {
1261                                 if (rte_atomic16_cmpset(&(port->port_status),
1262                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1263                                         printf("Port %d can not be set back "
1264                                                         "to stopped\n", pi);
1265                                 printf("Fail to configure port %d\n", pi);
1266                                 /* try to reconfigure port next time */
1267                                 port->need_reconfig = 1;
1268                                 return -1;
1269                         }
1270                 }
1271                 if (port->need_reconfig_queues > 0) {
1272                         port->need_reconfig_queues = 0;
1273                         /* setup tx queues */
1274                         for (qi = 0; qi < nb_txq; qi++) {
1275                                 if ((numa_support) &&
1276                                         (txring_numa[pi] != NUMA_NO_CONFIG)) 
1277                                         diag = rte_eth_tx_queue_setup(pi, qi,
1278                                                 nb_txd,txring_numa[pi],
1279                                                 &(port->tx_conf));
1280                                 else
1281                                         diag = rte_eth_tx_queue_setup(pi, qi, 
1282                                                 nb_txd,port->socket_id,
1283                                                 &(port->tx_conf));
1284                                         
1285                                 if (diag == 0)
1286                                         continue;
1287
1288                                 /* Fail to setup tx queue, return */
1289                                 if (rte_atomic16_cmpset(&(port->port_status),
1290                                                         RTE_PORT_HANDLING,
1291                                                         RTE_PORT_STOPPED) == 0)
1292                                         printf("Port %d can not be set back "
1293                                                         "to stopped\n", pi);
1294                                 printf("Fail to configure port %d tx queues\n", pi);
1295                                 /* try to reconfigure queues next time */
1296                                 port->need_reconfig_queues = 1;
1297                                 return -1;
1298                         }
1299                         /* setup rx queues */
1300                         for (qi = 0; qi < nb_rxq; qi++) {
1301                                 if ((numa_support) && 
1302                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1303                                         struct rte_mempool * mp = 
1304                                                 mbuf_pool_find(rxring_numa[pi]);
1305                                         if (mp == NULL) {
1306                                                 printf("Failed to setup RX queue:"
1307                                                         "No mempool allocation"
1308                                                         "on the socket %d\n",
1309                                                         rxring_numa[pi]);
1310                                                 return -1;
1311                                         }
1312                                         
1313                                         diag = rte_eth_rx_queue_setup(pi, qi,
1314                                              nb_rxd,rxring_numa[pi],
1315                                              &(port->rx_conf),mp);
1316                                 }
1317                                 else
1318                                         diag = rte_eth_rx_queue_setup(pi, qi, 
1319                                              nb_rxd,port->socket_id,
1320                                              &(port->rx_conf),
1321                                              mbuf_pool_find(port->socket_id));
1322
1323                                 if (diag == 0)
1324                                         continue;
1325
1326
1327                                 /* Fail to setup rx queue, return */
1328                                 if (rte_atomic16_cmpset(&(port->port_status),
1329                                                         RTE_PORT_HANDLING,
1330                                                         RTE_PORT_STOPPED) == 0)
1331                                         printf("Port %d can not be set back "
1332                                                         "to stopped\n", pi);
1333                                 printf("Fail to configure port %d rx queues\n", pi);
1334                                 /* try to reconfigure queues next time */
1335                                 port->need_reconfig_queues = 1;
1336                                 return -1;
1337                         }
1338                 }
1339                 /* start port */
1340                 if (rte_eth_dev_start(pi) < 0) {
1341                         printf("Fail to start port %d\n", pi);
1342
1343                         /* Fail to setup rx queue, return */
1344                         if (rte_atomic16_cmpset(&(port->port_status),
1345                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1346                                 printf("Port %d can not be set back to "
1347                                                         "stopped\n", pi);
1348                         continue;
1349                 }
1350
1351                 if (rte_atomic16_cmpset(&(port->port_status),
1352                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1353                         printf("Port %d can not be set into started\n", pi);
1354
1355                 /* at least one port started, need checking link status */
1356                 need_check_link_status = 1;
1357         }
1358
1359         if (need_check_link_status)
1360                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1361         else
1362                 printf("Please stop the ports first\n");
1363
1364         printf("Done\n");
1365         return 0;
1366 }
1367
1368 void
1369 stop_port(portid_t pid)
1370 {
1371         portid_t pi;
1372         struct rte_port *port;
1373         int need_check_link_status = 0;
1374
1375         if (test_done == 0) {
1376                 printf("Please stop forwarding first\n");
1377                 return;
1378         }
1379         if (dcb_test) {
1380                 dcb_test = 0;
1381                 dcb_config = 0;
1382         }
1383         printf("Stopping ports...\n");
1384
1385         for (pi = 0; pi < nb_ports; pi++) {
1386                 if (pid < nb_ports && pid != pi)
1387                         continue;
1388
1389                 port = &ports[pi];
1390                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1391                                                 RTE_PORT_HANDLING) == 0)
1392                         continue;
1393
1394                 rte_eth_dev_stop(pi);
1395
1396                 if (rte_atomic16_cmpset(&(port->port_status),
1397                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1398                         printf("Port %d can not be set into stopped\n", pi);
1399                 need_check_link_status = 1;
1400         }
1401         if (need_check_link_status)
1402                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1403
1404         printf("Done\n");
1405 }
1406
1407 void
1408 close_port(portid_t pid)
1409 {
1410         portid_t pi;
1411         struct rte_port *port;
1412
1413         if (test_done == 0) {
1414                 printf("Please stop forwarding first\n");
1415                 return;
1416         }
1417
1418         printf("Closing ports...\n");
1419
1420         for (pi = 0; pi < nb_ports; pi++) {
1421                 if (pid < nb_ports && pid != pi)
1422                         continue;
1423
1424                 port = &ports[pi];
1425                 if (rte_atomic16_cmpset(&(port->port_status),
1426                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1427                         printf("Port %d is now not stopped\n", pi);
1428                         continue;
1429                 }
1430
1431                 rte_eth_dev_close(pi);
1432
1433                 if (rte_atomic16_cmpset(&(port->port_status),
1434                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1435                         printf("Port %d can not be set into stopped\n", pi);
1436         }
1437
1438         printf("Done\n");
1439 }
1440
1441 int
1442 all_ports_stopped(void)
1443 {
1444         portid_t pi;
1445         struct rte_port *port;
1446
1447         for (pi = 0; pi < nb_ports; pi++) {
1448                 port = &ports[pi];
1449                 if (port->port_status != RTE_PORT_STOPPED)
1450                         return 0;
1451         }
1452
1453         return 1;
1454 }
1455
1456 void
1457 pmd_test_exit(void)
1458 {
1459         portid_t pt_id;
1460
1461         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1462                 printf("Stopping port %d...", pt_id);
1463                 fflush(stdout);
1464                 rte_eth_dev_close(pt_id);
1465                 printf("done\n");
1466         }
1467         printf("bye...\n");
1468 }
1469
1470 typedef void (*cmd_func_t)(void);
1471 struct pmd_test_command {
1472         const char *cmd_name;
1473         cmd_func_t cmd_func;
1474 };
1475
1476 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1477
1478 /* Check the link status of all ports in up to 9s, and print them finally */
1479 static void
1480 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1481 {
1482 #define CHECK_INTERVAL 100 /* 100ms */
1483 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1484         uint8_t portid, count, all_ports_up, print_flag = 0;
1485         struct rte_eth_link link;
1486
1487         printf("Checking link statuses...\n");
1488         fflush(stdout);
1489         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1490                 all_ports_up = 1;
1491                 for (portid = 0; portid < port_num; portid++) {
1492                         if ((port_mask & (1 << portid)) == 0)
1493                                 continue;
1494                         memset(&link, 0, sizeof(link));
1495                         rte_eth_link_get_nowait(portid, &link);
1496                         /* print link status if flag set */
1497                         if (print_flag == 1) {
1498                                 if (link.link_status)
1499                                         printf("Port %d Link Up - speed %u "
1500                                                 "Mbps - %s\n", (uint8_t)portid,
1501                                                 (unsigned)link.link_speed,
1502                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1503                                         ("full-duplex") : ("half-duplex\n"));
1504                                 else
1505                                         printf("Port %d Link Down\n",
1506                                                 (uint8_t)portid);
1507                                 continue;
1508                         }
1509                         /* clear all_ports_up flag if any link down */
1510                         if (link.link_status == 0) {
1511                                 all_ports_up = 0;
1512                                 break;
1513                         }
1514                 }
1515                 /* after finally printing all link status, get out */
1516                 if (print_flag == 1)
1517                         break;
1518
1519                 if (all_ports_up == 0) {
1520                         fflush(stdout);
1521                         rte_delay_ms(CHECK_INTERVAL);
1522                 }
1523
1524                 /* set the print_flag if all ports up or timeout */
1525                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1526                         print_flag = 1;
1527                 }
1528         }
1529 }
1530
1531 static int
1532 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1533 {
1534         uint16_t i;
1535         int diag;
1536         uint8_t mapping_found = 0;
1537
1538         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1539                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1540                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1541                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1542                                         tx_queue_stats_mappings[i].queue_id,
1543                                         tx_queue_stats_mappings[i].stats_counter_id);
1544                         if (diag != 0)
1545                                 return diag;
1546                         mapping_found = 1;
1547                 }
1548         }
1549         if (mapping_found)
1550                 port->tx_queue_stats_mapping_enabled = 1;
1551         return 0;
1552 }
1553
1554 static int
1555 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1556 {
1557         uint16_t i;
1558         int diag;
1559         uint8_t mapping_found = 0;
1560
1561         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1562                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1563                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1564                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1565                                         rx_queue_stats_mappings[i].queue_id,
1566                                         rx_queue_stats_mappings[i].stats_counter_id);
1567                         if (diag != 0)
1568                                 return diag;
1569                         mapping_found = 1;
1570                 }
1571         }
1572         if (mapping_found)
1573                 port->rx_queue_stats_mapping_enabled = 1;
1574         return 0;
1575 }
1576
1577 static void
1578 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1579 {
1580         int diag = 0;
1581
1582         diag = set_tx_queue_stats_mapping_registers(pi, port);
1583         if (diag != 0) {
1584                 if (diag == -ENOTSUP) {
1585                         port->tx_queue_stats_mapping_enabled = 0;
1586                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1587                 }
1588                 else
1589                         rte_exit(EXIT_FAILURE,
1590                                         "set_tx_queue_stats_mapping_registers "
1591                                         "failed for port id=%d diag=%d\n",
1592                                         pi, diag);
1593         }
1594
1595         diag = set_rx_queue_stats_mapping_registers(pi, port);
1596         if (diag != 0) {
1597                 if (diag == -ENOTSUP) {
1598                         port->rx_queue_stats_mapping_enabled = 0;
1599                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1600                 }
1601                 else
1602                         rte_exit(EXIT_FAILURE,
1603                                         "set_rx_queue_stats_mapping_registers "
1604                                         "failed for port id=%d diag=%d\n",
1605                                         pi, diag);
1606         }
1607 }
1608
1609 void
1610 init_port_config(void)
1611 {
1612         portid_t pid;
1613         struct rte_port *port;
1614
1615         for (pid = 0; pid < nb_ports; pid++) {
1616                 port = &ports[pid];
1617                 port->dev_conf.rxmode = rx_mode;
1618                 port->dev_conf.fdir_conf = fdir_conf;
1619                 if (nb_rxq > 1) {
1620                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1621                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1622                 } else {
1623                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1624                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1625                 }
1626
1627                 /* In SR-IOV mode, RSS mode is not available */
1628                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1629                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1630                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1631                         else
1632                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;        
1633                 }
1634
1635                 port->rx_conf.rx_thresh = rx_thresh;
1636                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1637                 port->rx_conf.rx_drop_en = rx_drop_en;
1638                 port->tx_conf.tx_thresh = tx_thresh;
1639                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1640                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1641                 port->tx_conf.txq_flags = txq_flags;
1642
1643                 rte_eth_macaddr_get(pid, &port->eth_addr);
1644
1645                 map_port_queue_stats_mapping_registers(pid, port);
1646 #ifdef RTE_NIC_BYPASS
1647                 rte_eth_dev_bypass_init(pid);
1648 #endif
1649         }
1650 }
1651
1652 const uint16_t vlan_tags[] = {
1653                 0,  1,  2,  3,  4,  5,  6,  7,
1654                 8,  9, 10, 11,  12, 13, 14, 15,
1655                 16, 17, 18, 19, 20, 21, 22, 23,
1656                 24, 25, 26, 27, 28, 29, 30, 31
1657 };
1658
1659 static  int
1660 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1661 {
1662         uint8_t i;
1663  
1664         /*
1665          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1666          * given above, and the number of traffic classes available for use.
1667          */
1668         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1669                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1670                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1671  
1672                 /* VMDQ+DCB RX and TX configrations */
1673                 vmdq_rx_conf.enable_default_pool = 0;
1674                 vmdq_rx_conf.default_pool = 0;
1675                 vmdq_rx_conf.nb_queue_pools =
1676                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1677                 vmdq_tx_conf.nb_queue_pools =
1678                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1679  
1680                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1681                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1682                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1683                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1684                 }
1685                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1686                         vmdq_rx_conf.dcb_queue[i] = i;
1687                         vmdq_tx_conf.dcb_queue[i] = i;
1688                 }
1689  
1690                 /*set DCB mode of RX and TX of multiple queues*/
1691                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1692                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1693                 if (dcb_conf->pfc_en)
1694                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1695                 else
1696                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1697  
1698                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1699                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1700                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1701                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1702         }
1703         else {
1704                 struct rte_eth_dcb_rx_conf rx_conf;
1705                 struct rte_eth_dcb_tx_conf tx_conf;
1706  
1707                 /* queue mapping configuration of DCB RX and TX */
1708                 if (dcb_conf->num_tcs == ETH_4_TCS)
1709                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1710                 else
1711                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1712  
1713                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1714                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1715  
1716                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1717                         rx_conf.dcb_queue[i] = i;
1718                         tx_conf.dcb_queue[i] = i;
1719                 }
1720                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1721                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1722                 if (dcb_conf->pfc_en)
1723                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1724                 else
1725                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1726                  
1727                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1728                                 sizeof(struct rte_eth_dcb_rx_conf)));
1729                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1730                                 sizeof(struct rte_eth_dcb_tx_conf)));
1731         }
1732
1733         return 0;
1734 }
1735
1736 int
1737 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1738 {
1739         struct rte_eth_conf port_conf;
1740         struct rte_port *rte_port;
1741         int retval;
1742         uint16_t nb_vlan;
1743         uint16_t i;
1744  
1745         /* rxq and txq configuration in dcb mode */
1746         nb_rxq = 128;
1747         nb_txq = 128;
1748         rx_free_thresh = 64;
1749  
1750         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1751         /* Enter DCB configuration status */
1752         dcb_config = 1;
1753  
1754         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1755         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1756         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1757         if (retval < 0)
1758                 return retval;
1759  
1760         rte_port = &ports[pid];
1761         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1762  
1763         rte_port->rx_conf.rx_thresh = rx_thresh;
1764         rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1765         rte_port->tx_conf.tx_thresh = tx_thresh;
1766         rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1767         rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1768         /* VLAN filter */
1769         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1770         for (i = 0; i < nb_vlan; i++){
1771                 rx_vft_set(pid, vlan_tags[i], 1);
1772         }
1773  
1774         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1775         map_port_queue_stats_mapping_registers(pid, rte_port);
1776
1777         rte_port->dcb_flag = 1;
1778  
1779         return 0;
1780 }
1781
1782 #ifdef RTE_EXEC_ENV_BAREMETAL
1783 #define main _main
1784 #endif
1785
1786 int
1787 main(int argc, char** argv)
1788 {
1789         int  diag;
1790         uint8_t port_id;
1791
1792         diag = rte_eal_init(argc, argv);
1793         if (diag < 0)
1794                 rte_panic("Cannot init EAL\n");
1795
1796         if (rte_pmd_init_all())
1797                 rte_panic("Cannot init PMD\n");
1798
1799         if (rte_eal_pci_probe())
1800                 rte_panic("Cannot probe PCI\n");
1801
1802         nb_ports = (portid_t) rte_eth_dev_count();
1803         if (nb_ports == 0)
1804                 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1805                                                         "check that "
1806                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1807                           "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1808                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1809                           "configuration file\n");
1810
1811         set_def_fwd_config();
1812         if (nb_lcores == 0)
1813                 rte_panic("Empty set of forwarding logical cores - check the "
1814                           "core mask supplied in the command parameters\n");
1815
1816         argc -= diag;
1817         argv += diag;
1818         if (argc > 1)
1819                 launch_args_parse(argc, argv);
1820
1821         if (nb_rxq > nb_txq)
1822                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1823                        "but nb_txq=%d will prevent to fully test it.\n",
1824                        nb_rxq, nb_txq);
1825
1826         init_config();
1827         if (start_port(RTE_PORT_ALL) != 0)
1828                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1829
1830         /* set all ports to promiscuous mode by default */
1831         for (port_id = 0; port_id < nb_ports; port_id++)
1832                 rte_eth_promiscuous_enable(port_id);
1833
1834 #ifdef RTE_LIBRTE_CMDLINE
1835         if (interactive == 1) {
1836                 if (auto_start) {
1837                         printf("Start automatic packet forwarding\n");
1838                         start_packet_forwarding(0);
1839                 }
1840                 prompt();
1841         } else
1842 #endif
1843         {
1844                 char c;
1845                 int rc;
1846
1847                 printf("No commandline core given, start packet forwarding\n");
1848                 start_packet_forwarding(0);
1849                 printf("Press enter to exit\n");
1850                 rc = read(0, &c, 1);
1851                 if (rc < 0)
1852                         return 1;
1853         }
1854
1855         return 0;
1856 }