5d99447c8b1e9666faf0fff7babdd7dd08781ff0
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <stdarg.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <signal.h>
39 #include <string.h>
40 #include <time.h>
41 #include <fcntl.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
62 #include <rte_eal.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_ring.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76
77 #include "testpmd.h"
78
79 uint16_t verbose_level = 0; /**< Silent by default. */
80
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
83
84 /*
85  * NUMA support configuration.
86  * When set, the NUMA support attempts to dispatch the allocation of the
87  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88  * probed ports among the CPU sockets 0 and 1.
89  * Otherwise, all memory is allocated from CPU socket 0.
90  */
91 uint8_t numa_support = 0; /**< No numa support by default */
92
93 /*
94  * In UMA mode,all memory is allocated from socket 0 if --socket-num is 
95  * not configured.
96  */
97 uint8_t socket_num = UMA_NO_CONFIG; 
98
99 /*
100  * Record the Ethernet address of peer target ports to which packets are
101  * forwarded.
102  * Must be instanciated with the ethernet addresses of peer traffic generator
103  * ports.
104  */
105 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
106 portid_t nb_peer_eth_addrs = 0;
107
108 /*
109  * Probed Target Environment.
110  */
111 struct rte_port *ports;        /**< For all probed ethernet ports. */
112 portid_t nb_ports;             /**< Number of probed ethernet ports. */
113 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
114 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
115
116 /*
117  * Test Forwarding Configuration.
118  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
119  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
120  */
121 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
122 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
123 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
124 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
125
126 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
127 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
128
129 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
130 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
131
132 /*
133  * Forwarding engines.
134  */
135 struct fwd_engine * fwd_engines[] = {
136         &io_fwd_engine,
137         &mac_fwd_engine,
138         &rx_only_engine,
139         &tx_only_engine,
140         &csum_fwd_engine,
141 #ifdef RTE_LIBRTE_IEEE1588
142         &ieee1588_fwd_engine,
143 #endif
144         NULL,
145 };
146
147 struct fwd_config cur_fwd_config;
148 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
149
150 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
151 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
152                                       * specified on command-line. */
153
154 /*
155  * Configuration of packet segments used by the "txonly" processing engine.
156  */
157 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
158 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
159         TXONLY_DEF_PACKET_LEN,
160 };
161 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
162
163 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
164 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
165
166 /* current configuration is in DCB or not,0 means it is not in DCB mode */
167 uint8_t dcb_config = 0;
168  
169 /* Whether the dcb is in testing status */
170 uint8_t dcb_test = 0;
171  
172 /* DCB on and VT on mapping is default */
173 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
174
175 /*
176  * Configurable number of RX/TX queues.
177  */
178 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
179 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
180
181 /*
182  * Configurable number of RX/TX ring descriptors.
183  */
184 #define RTE_TEST_RX_DESC_DEFAULT 128
185 #define RTE_TEST_TX_DESC_DEFAULT 512
186 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
187 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
188
189 /*
190  * Configurable values of RX and TX ring threshold registers.
191  */
192 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
193 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
194 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
195
196 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
197 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
198 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
199
200 struct rte_eth_thresh rx_thresh = {
201         .pthresh = RX_PTHRESH,
202         .hthresh = RX_HTHRESH,
203         .wthresh = RX_WTHRESH,
204 };
205
206 struct rte_eth_thresh tx_thresh = {
207         .pthresh = TX_PTHRESH,
208         .hthresh = TX_HTHRESH,
209         .wthresh = TX_WTHRESH,
210 };
211
212 /*
213  * Configurable value of RX free threshold.
214  */
215 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
216
217 /*
218  * Configurable value of RX drop enable.
219  */
220 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
221
222 /*
223  * Configurable value of TX free threshold.
224  */
225 uint16_t tx_free_thresh = 0; /* Use default values. */
226
227 /*
228  * Configurable value of TX RS bit threshold.
229  */
230 uint16_t tx_rs_thresh = 0; /* Use default values. */
231
232 /*
233  * Configurable value of TX queue flags.
234  */
235 uint32_t txq_flags = 0; /* No flags set. */
236
237 /*
238  * Receive Side Scaling (RSS) configuration.
239  */
240 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
241
242 /*
243  * Port topology configuration
244  */
245 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
246
247 /*
248  * Ethernet device configuration.
249  */
250 struct rte_eth_rxmode rx_mode = {
251         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
252         .split_hdr_size = 0,
253         .header_split   = 0, /**< Header Split disabled. */
254         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
255         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
256         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
257         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
258         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
259         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
260 };
261
262 struct rte_fdir_conf fdir_conf = {
263         .mode = RTE_FDIR_MODE_NONE,
264         .pballoc = RTE_FDIR_PBALLOC_64K,
265         .status = RTE_FDIR_REPORT_STATUS,
266         .flexbytes_offset = 0x6,
267         .drop_queue = 127,
268 };
269
270 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
271
272 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
273 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
274
275 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
276 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
277
278 uint16_t nb_tx_queue_stats_mappings = 0;
279 uint16_t nb_rx_queue_stats_mappings = 0;
280
281 /* Forward function declarations */
282 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
283 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
284
285 /*
286  * Check if all the ports are started.
287  * If yes, return positive value. If not, return zero.
288  */
289 static int all_ports_started(void);
290
291 /*
292  * Setup default configuration.
293  */
294 static void
295 set_default_fwd_lcores_config(void)
296 {
297         unsigned int i;
298         unsigned int nb_lc;
299
300         nb_lc = 0;
301         for (i = 0; i < RTE_MAX_LCORE; i++) {
302                 if (! rte_lcore_is_enabled(i))
303                         continue;
304                 if (i == rte_get_master_lcore())
305                         continue;
306                 fwd_lcores_cpuids[nb_lc++] = i;
307         }
308         nb_lcores = (lcoreid_t) nb_lc;
309         nb_cfg_lcores = nb_lcores;
310         nb_fwd_lcores = 1;
311 }
312
313 static void
314 set_def_peer_eth_addrs(void)
315 {
316         portid_t i;
317
318         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
319                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
320                 peer_eth_addrs[i].addr_bytes[5] = i;
321         }
322 }
323
324 static void
325 set_default_fwd_ports_config(void)
326 {
327         portid_t pt_id;
328
329         for (pt_id = 0; pt_id < nb_ports; pt_id++)
330                 fwd_ports_ids[pt_id] = pt_id;
331
332         nb_cfg_ports = nb_ports;
333         nb_fwd_ports = nb_ports;
334 }
335
336 void
337 set_def_fwd_config(void)
338 {
339         set_default_fwd_lcores_config();
340         set_def_peer_eth_addrs();
341         set_default_fwd_ports_config();
342 }
343
344 /*
345  * Configuration initialisation done once at init time.
346  */
347 struct mbuf_ctor_arg {
348         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
349         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
350 };
351
352 struct mbuf_pool_ctor_arg {
353         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
354 };
355
356 static void
357 testpmd_mbuf_ctor(struct rte_mempool *mp,
358                   void *opaque_arg,
359                   void *raw_mbuf,
360                   __attribute__((unused)) unsigned i)
361 {
362         struct mbuf_ctor_arg *mb_ctor_arg;
363         struct rte_mbuf    *mb;
364
365         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
366         mb = (struct rte_mbuf *) raw_mbuf;
367
368         mb->type         = RTE_MBUF_PKT;
369         mb->pool         = mp;
370         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
371         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
372                         mb_ctor_arg->seg_buf_offset);
373         mb->buf_len      = mb_ctor_arg->seg_buf_size;
374         mb->type         = RTE_MBUF_PKT;
375         mb->ol_flags     = 0;
376         mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
377         mb->pkt.nb_segs  = 1;
378         mb->pkt.vlan_macip.data = 0;
379         mb->pkt.hash.rss = 0;
380 }
381
382 static void
383 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
384                        void *opaque_arg)
385 {
386         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
387         struct rte_pktmbuf_pool_private *mbp_priv;
388
389         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
390                 printf("%s(%s) private_data_size %d < %d\n",
391                        __func__, mp->name, (int) mp->private_data_size,
392                        (int) sizeof(struct rte_pktmbuf_pool_private));
393                 return;
394         }
395         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
396         mbp_priv = (struct rte_pktmbuf_pool_private *)
397                 ((char *)mp + sizeof(struct rte_mempool));
398         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
399 }
400
401 static void
402 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
403                  unsigned int socket_id)
404 {
405         char pool_name[RTE_MEMPOOL_NAMESIZE];
406         struct rte_mempool *rte_mp;
407         struct mbuf_pool_ctor_arg mbp_ctor_arg;
408         struct mbuf_ctor_arg mb_ctor_arg;
409         uint32_t mb_size;
410
411         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
412                                                 mbuf_seg_size);
413         mb_ctor_arg.seg_buf_offset =
414                 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
415         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
416         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418         rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
419                                     (unsigned) mb_mempool_cache,
420                                     sizeof(struct rte_pktmbuf_pool_private),
421                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
422                                     testpmd_mbuf_ctor, &mb_ctor_arg,
423                                     socket_id, 0);
424         if (rte_mp == NULL) {
425                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
426                                                 "failed\n", socket_id);
427         }
428 }
429
430 static void
431 init_config(void)
432 {
433         portid_t pid;
434         struct rte_port *port;
435         struct rte_mempool *mbp;
436         unsigned int nb_mbuf_per_pool;
437         lcoreid_t  lc_id;
438         uint8_t port_per_socket[MAX_SOCKET];
439
440         memset(port_per_socket,0,MAX_SOCKET);
441         /* Configuration of logical cores. */
442         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
443                                 sizeof(struct fwd_lcore *) * nb_lcores,
444                                 CACHE_LINE_SIZE);
445         if (fwd_lcores == NULL) {
446                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
447                                                         "failed\n", nb_lcores);
448         }
449         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
450                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
451                                                sizeof(struct fwd_lcore),
452                                                CACHE_LINE_SIZE);
453                 if (fwd_lcores[lc_id] == NULL) {
454                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
455                                                                 "failed\n");
456                 }
457                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
458         }
459
460         /*
461          * Create pools of mbuf.
462          * If NUMA support is disabled, create a single pool of mbuf in
463          * socket 0 memory by default.
464          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
465          *
466          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
467          * nb_txd can be configured at run time.
468          */
469         if (param_total_num_mbufs) 
470                 nb_mbuf_per_pool = param_total_num_mbufs;
471         else {
472                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
473                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
474                 
475                 if (!numa_support) 
476                         nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
477         }
478
479         if (!numa_support) {
480                 if (socket_num == UMA_NO_CONFIG)
481                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
482                 else
483                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
484                                                  socket_num);
485         }
486         /*
487          * Records which Mbuf pool to use by each logical core, if needed.
488          */
489         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
490                 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
491                 if (mbp == NULL)
492                         mbp = mbuf_pool_find(0);
493                 fwd_lcores[lc_id]->mbp = mbp;
494         }
495
496         /* Configuration of Ethernet ports. */
497         ports = rte_zmalloc("testpmd: ports",
498                             sizeof(struct rte_port) * nb_ports,
499                             CACHE_LINE_SIZE);
500         if (ports == NULL) {
501                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
502                                                         "failed\n", nb_ports);
503         }
504         
505         for (pid = 0; pid < nb_ports; pid++) {
506                 port = &ports[pid];
507                 rte_eth_dev_info_get(pid, &port->dev_info);
508
509                 if (numa_support) {
510                         if (port_numa[pid] != NUMA_NO_CONFIG) 
511                                 port_per_socket[port_numa[pid]]++;
512                         else {
513                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
514                                 port_per_socket[socket_id]++; 
515                         }
516                 }
517
518                 /* set flag to initialize port/queue */
519                 port->need_reconfig = 1;
520                 port->need_reconfig_queues = 1;
521         }
522
523         if (numa_support) {
524                 uint8_t i;
525                 unsigned int nb_mbuf;
526
527                 if (param_total_num_mbufs)
528                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
529
530                 for (i = 0; i < MAX_SOCKET; i++) {
531                         nb_mbuf = (nb_mbuf_per_pool * 
532                                                 port_per_socket[i]);
533                         if (nb_mbuf) 
534                                 mbuf_pool_create(mbuf_data_size,
535                                                 nb_mbuf,i);
536                 }
537         }
538         init_port_config();
539         /* Configuration of packet forwarding streams. */
540         if (init_fwd_streams() < 0)
541                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
542 }
543
544 int
545 init_fwd_streams(void)
546 {
547         portid_t pid;
548         struct rte_port *port;
549         streamid_t sm_id, nb_fwd_streams_new;
550
551         /* set socket id according to numa or not */
552         for (pid = 0; pid < nb_ports; pid++) {
553                 port = &ports[pid];
554                 if (nb_rxq > port->dev_info.max_rx_queues) {
555                         printf("Fail: nb_rxq(%d) is greater than "
556                                 "max_rx_queues(%d)\n", nb_rxq,
557                                 port->dev_info.max_rx_queues);
558                         return -1;
559                 }
560                 if (nb_txq > port->dev_info.max_tx_queues) {
561                         printf("Fail: nb_txq(%d) is greater than "
562                                 "max_tx_queues(%d)\n", nb_txq,
563                                 port->dev_info.max_tx_queues);
564                         return -1;
565                 }
566                 if (numa_support) 
567                         port->socket_id = rte_eth_dev_socket_id(pid);
568                 else {
569                         if (socket_num == UMA_NO_CONFIG)         
570                                 port->socket_id = 0;
571                         else 
572                                 port->socket_id = socket_num;   
573                 }
574         }
575
576         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
577         if (nb_fwd_streams_new == nb_fwd_streams)
578                 return 0;
579         /* clear the old */
580         if (fwd_streams != NULL) {
581                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
582                         if (fwd_streams[sm_id] == NULL)
583                                 continue;
584                         rte_free(fwd_streams[sm_id]);
585                         fwd_streams[sm_id] = NULL;
586                 }
587                 rte_free(fwd_streams);
588                 fwd_streams = NULL;
589         }
590
591         /* init new */
592         nb_fwd_streams = nb_fwd_streams_new;
593         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
594                 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
595         if (fwd_streams == NULL)
596                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
597                                                 "failed\n", nb_fwd_streams);
598
599         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
600                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
601                                 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
602                 if (fwd_streams[sm_id] == NULL)
603                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
604                                                                 " failed\n");
605         }
606
607         return 0;
608 }
609
610 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
611 static void
612 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
613 {
614         unsigned int total_burst;
615         unsigned int nb_burst;
616         unsigned int burst_stats[3];
617         uint16_t pktnb_stats[3];
618         uint16_t nb_pkt;
619         int burst_percent[3];
620
621         /*
622          * First compute the total number of packet bursts and the
623          * two highest numbers of bursts of the same number of packets.
624          */
625         total_burst = 0;
626         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
627         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
628         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
629                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
630                 if (nb_burst == 0)
631                         continue;
632                 total_burst += nb_burst;
633                 if (nb_burst > burst_stats[0]) {
634                         burst_stats[1] = burst_stats[0];
635                         pktnb_stats[1] = pktnb_stats[0];
636                         burst_stats[0] = nb_burst;
637                         pktnb_stats[0] = nb_pkt;
638                 }
639         }
640         if (total_burst == 0)
641                 return;
642         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
643         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
644                burst_percent[0], (int) pktnb_stats[0]);
645         if (burst_stats[0] == total_burst) {
646                 printf("]\n");
647                 return;
648         }
649         if (burst_stats[0] + burst_stats[1] == total_burst) {
650                 printf(" + %d%% of %d pkts]\n",
651                        100 - burst_percent[0], pktnb_stats[1]);
652                 return;
653         }
654         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
655         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
656         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
657                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
658                 return;
659         }
660         printf(" + %d%% of %d pkts + %d%% of others]\n",
661                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
662 }
663 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
664
665 static void
666 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
667 {
668         struct rte_port *port;
669         uint8_t i;
670
671         static const char *fwd_stats_border = "----------------------";
672
673         port = &ports[port_id];
674         printf("\n  %s Forward statistics for port %-2d %s\n",
675                fwd_stats_border, port_id, fwd_stats_border);
676
677         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
678                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
679                        "%-"PRIu64"\n",
680                        stats->ipackets, stats->ierrors,
681                        (uint64_t) (stats->ipackets + stats->ierrors));
682
683                 if (cur_fwd_eng == &csum_fwd_engine)
684                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
685                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
686
687                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
688                        "%-"PRIu64"\n",
689                        stats->opackets, port->tx_dropped,
690                        (uint64_t) (stats->opackets + port->tx_dropped));
691
692                 if (stats->rx_nombuf > 0)
693                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
694
695         }
696         else {
697                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
698                        "%14"PRIu64"\n",
699                        stats->ipackets, stats->ierrors,
700                        (uint64_t) (stats->ipackets + stats->ierrors));
701
702                 if (cur_fwd_eng == &csum_fwd_engine)
703                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
704                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
705
706                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
707                        "%14"PRIu64"\n",
708                        stats->opackets, port->tx_dropped,
709                        (uint64_t) (stats->opackets + port->tx_dropped));
710
711                 if (stats->rx_nombuf > 0)
712                         printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
713         }
714 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
715         if (port->rx_stream)
716                 pkt_burst_stats_display("RX",
717                         &port->rx_stream->rx_burst_stats);
718         if (port->tx_stream)
719                 pkt_burst_stats_display("TX",
720                         &port->tx_stream->tx_burst_stats);
721 #endif
722         /* stats fdir */
723         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
724                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
725                        stats->fdirmiss,
726                        stats->fdirmatch);
727
728         if (port->rx_queue_stats_mapping_enabled) {
729                 printf("\n");
730                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
731                         printf("  Stats reg %2d RX-packets:%14"PRIu64
732                                "     RX-errors:%14"PRIu64
733                                "    RX-bytes:%14"PRIu64"\n",
734                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
735                 }
736                 printf("\n");
737         }
738         if (port->tx_queue_stats_mapping_enabled) {
739                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
740                         printf("  Stats reg %2d TX-packets:%14"PRIu64
741                                "                                 TX-bytes:%14"PRIu64"\n",
742                                i, stats->q_opackets[i], stats->q_obytes[i]);
743                 }
744         }
745
746         printf("  %s--------------------------------%s\n",
747                fwd_stats_border, fwd_stats_border);
748 }
749
750 static void
751 fwd_stream_stats_display(streamid_t stream_id)
752 {
753         struct fwd_stream *fs;
754         static const char *fwd_top_stats_border = "-------";
755
756         fs = fwd_streams[stream_id];
757         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
758             (fs->fwd_dropped == 0))
759                 return;
760         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
761                "TX Port=%2d/Queue=%2d %s\n",
762                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
763                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
764         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
765                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
766
767         /* if checksum mode */
768         if (cur_fwd_eng == &csum_fwd_engine) {
769                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
770                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
771         }
772
773 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
774         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
775         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
776 #endif
777 }
778
779 static void
780 flush_all_rx_queues(void)
781 {
782         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
783         portid_t  rxp;
784         queueid_t rxq;
785         uint16_t  nb_rx;
786         uint16_t  i;
787         uint8_t   j;
788
789         for (j = 0; j < 2; j++) {
790                 for (rxp = 0; rxp < nb_ports; rxp++) {
791                         for (rxq = 0; rxq < nb_rxq; rxq++) {
792                                 do {
793                                         nb_rx = rte_eth_rx_burst(rxp, rxq,
794                                                 pkts_burst, MAX_PKT_BURST);
795                                         for (i = 0; i < nb_rx; i++)
796                                                 rte_pktmbuf_free(pkts_burst[i]);
797                                 } while (nb_rx > 0);
798                         }
799                 }
800                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
801         }
802 }
803
804 static void
805 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
806 {
807         struct fwd_stream **fsm;
808         streamid_t nb_fs;
809         streamid_t sm_id;
810
811         fsm = &fwd_streams[fc->stream_idx];
812         nb_fs = fc->stream_nb;
813         do {
814                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
815                         (*pkt_fwd)(fsm[sm_id]);
816         } while (! fc->stopped);
817 }
818
819 static int
820 start_pkt_forward_on_core(void *fwd_arg)
821 {
822         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
823                              cur_fwd_config.fwd_eng->packet_fwd);
824         return 0;
825 }
826
827 /*
828  * Run the TXONLY packet forwarding engine to send a single burst of packets.
829  * Used to start communication flows in network loopback test configurations.
830  */
831 static int
832 run_one_txonly_burst_on_core(void *fwd_arg)
833 {
834         struct fwd_lcore *fwd_lc;
835         struct fwd_lcore tmp_lcore;
836
837         fwd_lc = (struct fwd_lcore *) fwd_arg;
838         tmp_lcore = *fwd_lc;
839         tmp_lcore.stopped = 1;
840         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
841         return 0;
842 }
843
844 /*
845  * Launch packet forwarding:
846  *     - Setup per-port forwarding context.
847  *     - launch logical cores with their forwarding configuration.
848  */
849 static void
850 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
851 {
852         port_fwd_begin_t port_fwd_begin;
853         unsigned int i;
854         unsigned int lc_id;
855         int diag;
856
857         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
858         if (port_fwd_begin != NULL) {
859                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
860                         (*port_fwd_begin)(fwd_ports_ids[i]);
861         }
862         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
863                 lc_id = fwd_lcores_cpuids[i];
864                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
865                         fwd_lcores[i]->stopped = 0;
866                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
867                                                      fwd_lcores[i], lc_id);
868                         if (diag != 0)
869                                 printf("launch lcore %u failed - diag=%d\n",
870                                        lc_id, diag);
871                 }
872         }
873 }
874
875 /*
876  * Launch packet forwarding configuration.
877  */
878 void
879 start_packet_forwarding(int with_tx_first)
880 {
881         port_fwd_begin_t port_fwd_begin;
882         port_fwd_end_t  port_fwd_end;
883         struct rte_port *port;
884         unsigned int i;
885         portid_t   pt_id;
886         streamid_t sm_id;
887
888         if (all_ports_started() == 0) {
889                 printf("Not all ports were started\n");
890                 return;
891         }
892         if (test_done == 0) {
893                 printf("Packet forwarding already started\n");
894                 return;
895         }
896         if((dcb_test) && (nb_fwd_lcores == 1)) {
897                 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
898                 return;
899         }
900         test_done = 0;
901         flush_all_rx_queues();
902         fwd_config_setup();
903         rxtx_config_display();
904
905         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
906                 pt_id = fwd_ports_ids[i];
907                 port = &ports[pt_id];
908                 rte_eth_stats_get(pt_id, &port->stats);
909                 port->tx_dropped = 0;
910
911                 map_port_queue_stats_mapping_registers(pt_id, port);
912         }
913         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
914                 fwd_streams[sm_id]->rx_packets = 0;
915                 fwd_streams[sm_id]->tx_packets = 0;
916                 fwd_streams[sm_id]->fwd_dropped = 0;
917                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
918                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
919
920 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
921                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
922                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
923                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
924                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
925 #endif
926 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
927                 fwd_streams[sm_id]->core_cycles = 0;
928 #endif
929         }
930         if (with_tx_first) {
931                 port_fwd_begin = tx_only_engine.port_fwd_begin;
932                 if (port_fwd_begin != NULL) {
933                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
934                                 (*port_fwd_begin)(fwd_ports_ids[i]);
935                 }
936                 launch_packet_forwarding(run_one_txonly_burst_on_core);
937                 rte_eal_mp_wait_lcore();
938                 port_fwd_end = tx_only_engine.port_fwd_end;
939                 if (port_fwd_end != NULL) {
940                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
941                                 (*port_fwd_end)(fwd_ports_ids[i]);
942                 }
943         }
944         launch_packet_forwarding(start_pkt_forward_on_core);
945 }
946
947 void
948 stop_packet_forwarding(void)
949 {
950         struct rte_eth_stats stats;
951         struct rte_port *port;
952         port_fwd_end_t  port_fwd_end;
953         int i;
954         portid_t   pt_id;
955         streamid_t sm_id;
956         lcoreid_t  lc_id;
957         uint64_t total_recv;
958         uint64_t total_xmit;
959         uint64_t total_rx_dropped;
960         uint64_t total_tx_dropped;
961         uint64_t total_rx_nombuf;
962         uint64_t tx_dropped;
963         uint64_t rx_bad_ip_csum;
964         uint64_t rx_bad_l4_csum;
965 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
966         uint64_t fwd_cycles;
967 #endif
968         static const char *acc_stats_border = "+++++++++++++++";
969
970         if (all_ports_started() == 0) {
971                 printf("Not all ports were started\n");
972                 return;
973         }
974         if (test_done) {
975                 printf("Packet forwarding not started\n");
976                 return;
977         }
978         printf("Telling cores to stop...");
979         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
980                 fwd_lcores[lc_id]->stopped = 1;
981         printf("\nWaiting for lcores to finish...\n");
982         rte_eal_mp_wait_lcore();
983         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
984         if (port_fwd_end != NULL) {
985                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
986                         pt_id = fwd_ports_ids[i];
987                         (*port_fwd_end)(pt_id);
988                 }
989         }
990 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
991         fwd_cycles = 0;
992 #endif
993         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
994                 if (cur_fwd_config.nb_fwd_streams >
995                     cur_fwd_config.nb_fwd_ports) {
996                         fwd_stream_stats_display(sm_id);
997                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
998                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
999                 } else {
1000                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1001                                 fwd_streams[sm_id];
1002                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1003                                 fwd_streams[sm_id];
1004                 }
1005                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1006                 tx_dropped = (uint64_t) (tx_dropped +
1007                                          fwd_streams[sm_id]->fwd_dropped);
1008                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1009
1010                 rx_bad_ip_csum =
1011                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1012                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1013                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1014                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1015                                                         rx_bad_ip_csum;
1016
1017                 rx_bad_l4_csum =
1018                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1019                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1020                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1021                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1022                                                         rx_bad_l4_csum;
1023
1024 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1025                 fwd_cycles = (uint64_t) (fwd_cycles +
1026                                          fwd_streams[sm_id]->core_cycles);
1027 #endif
1028         }
1029         total_recv = 0;
1030         total_xmit = 0;
1031         total_rx_dropped = 0;
1032         total_tx_dropped = 0;
1033         total_rx_nombuf  = 0;
1034         for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
1035                 pt_id = fwd_ports_ids[i];
1036
1037                 port = &ports[pt_id];
1038                 rte_eth_stats_get(pt_id, &stats);
1039                 stats.ipackets -= port->stats.ipackets;
1040                 port->stats.ipackets = 0;
1041                 stats.opackets -= port->stats.opackets;
1042                 port->stats.opackets = 0;
1043                 stats.ibytes   -= port->stats.ibytes;
1044                 port->stats.ibytes = 0;
1045                 stats.obytes   -= port->stats.obytes;
1046                 port->stats.obytes = 0;
1047                 stats.ierrors  -= port->stats.ierrors;
1048                 port->stats.ierrors = 0;
1049                 stats.oerrors  -= port->stats.oerrors;
1050                 port->stats.oerrors = 0;
1051                 stats.rx_nombuf -= port->stats.rx_nombuf;
1052                 port->stats.rx_nombuf = 0;
1053                 stats.fdirmatch -= port->stats.fdirmatch;
1054                 port->stats.rx_nombuf = 0;
1055                 stats.fdirmiss -= port->stats.fdirmiss;
1056                 port->stats.rx_nombuf = 0;
1057
1058                 total_recv += stats.ipackets;
1059                 total_xmit += stats.opackets;
1060                 total_rx_dropped += stats.ierrors;
1061                 total_tx_dropped += port->tx_dropped;
1062                 total_rx_nombuf  += stats.rx_nombuf;
1063
1064                 fwd_port_stats_display(pt_id, &stats);
1065         }
1066         printf("\n  %s Accumulated forward statistics for all ports"
1067                "%s\n",
1068                acc_stats_border, acc_stats_border);
1069         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1070                "%-"PRIu64"\n"
1071                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1072                "%-"PRIu64"\n",
1073                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1074                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1075         if (total_rx_nombuf > 0)
1076                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1077         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1078                "%s\n",
1079                acc_stats_border, acc_stats_border);
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081         if (total_recv > 0)
1082                 printf("\n  CPU cycles/packet=%u (total cycles="
1083                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1084                        (unsigned int)(fwd_cycles / total_recv),
1085                        fwd_cycles, total_recv);
1086 #endif
1087         printf("\nDone.\n");
1088         test_done = 1;
1089 }
1090
1091 static int
1092 all_ports_started(void)
1093 {
1094         portid_t pi;
1095         struct rte_port *port;
1096
1097         for (pi = 0; pi < nb_ports; pi++) {
1098                 port = &ports[pi];
1099                 /* Check if there is a port which is not started */
1100                 if (port->port_status != RTE_PORT_STARTED)
1101                         return 0;
1102         }
1103
1104         /* No port is not started */
1105         return 1;
1106 }
1107
1108 void
1109 start_port(portid_t pid)
1110 {
1111         int diag, need_check_link_status = 0;
1112         portid_t pi;
1113         queueid_t qi;
1114         struct rte_port *port;
1115
1116         if (test_done == 0) {
1117                 printf("Please stop forwarding first\n");
1118                 return;
1119         }
1120
1121         if (init_fwd_streams() < 0) {
1122                 printf("Fail from init_fwd_streams()\n");
1123                 return;
1124         }
1125         
1126         if(dcb_config)
1127                 dcb_test = 1;
1128         for (pi = 0; pi < nb_ports; pi++) {
1129                 if (pid < nb_ports && pid != pi)
1130                         continue;
1131
1132                 port = &ports[pi];
1133                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1134                                                  RTE_PORT_HANDLING) == 0) {
1135                         printf("Port %d is now not stopped\n", pi);
1136                         continue;
1137                 }
1138
1139                 if (port->need_reconfig > 0) {
1140                         port->need_reconfig = 0;
1141
1142                         printf("Configuring Port %d (socket %d)\n", pi,
1143                                         rte_eth_dev_socket_id(pi));
1144                         /* configure port */
1145                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1146                                                 &(port->dev_conf));
1147                         if (diag != 0) {
1148                                 if (rte_atomic16_cmpset(&(port->port_status),
1149                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1150                                         printf("Port %d can not be set back "
1151                                                         "to stopped\n", pi);
1152                                 printf("Fail to configure port %d\n", pi);
1153                                 /* try to reconfigure port next time */
1154                                 port->need_reconfig = 1;
1155                                 return;
1156                         }
1157                 }
1158                 if (port->need_reconfig_queues > 0) {
1159                         port->need_reconfig_queues = 0;
1160                         /* setup tx queues */
1161                         for (qi = 0; qi < nb_txq; qi++) {
1162                                 if ((numa_support) &&
1163                                         (txring_numa[pi] != NUMA_NO_CONFIG)) 
1164                                         diag = rte_eth_tx_queue_setup(pi, qi,
1165                                                 nb_txd,txring_numa[pi],
1166                                                 &(port->tx_conf));
1167                                 else
1168                                         diag = rte_eth_tx_queue_setup(pi, qi, 
1169                                                 nb_txd,port->socket_id,
1170                                                 &(port->tx_conf));
1171                                         
1172                                 if (diag == 0)
1173                                         continue;
1174
1175                                 /* Fail to setup tx queue, return */
1176                                 if (rte_atomic16_cmpset(&(port->port_status),
1177                                                         RTE_PORT_HANDLING,
1178                                                         RTE_PORT_STOPPED) == 0)
1179                                         printf("Port %d can not be set back "
1180                                                         "to stopped\n", pi);
1181                                 printf("Fail to configure port %d tx queues\n", pi);
1182                                 /* try to reconfigure queues next time */
1183                                 port->need_reconfig_queues = 1;
1184                                 return;
1185                         }
1186                         /* setup rx queues */
1187                         for (qi = 0; qi < nb_rxq; qi++) {
1188                                 if ((numa_support) && 
1189                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1190                                         struct rte_mempool * mp = 
1191                                                 mbuf_pool_find(rxring_numa[pi]);
1192                                         if (mp == NULL) {
1193                                                 printf("Failed to setup RX queue:"
1194                                                         "No mempool allocation"
1195                                                         "on the socket %d\n",
1196                                                         rxring_numa[pi]);
1197                                                 return;
1198                                         }
1199                                         
1200                                         diag = rte_eth_rx_queue_setup(pi, qi,
1201                                              nb_rxd,rxring_numa[pi],
1202                                              &(port->rx_conf),mp);
1203                                 }
1204                                 else
1205                                         diag = rte_eth_rx_queue_setup(pi, qi, 
1206                                              nb_rxd,port->socket_id,
1207                                              &(port->rx_conf),
1208                                              mbuf_pool_find(port->socket_id));
1209
1210                                 if (diag == 0)
1211                                         continue;
1212
1213
1214                                 /* Fail to setup rx queue, return */
1215                                 if (rte_atomic16_cmpset(&(port->port_status),
1216                                                         RTE_PORT_HANDLING,
1217                                                         RTE_PORT_STOPPED) == 0)
1218                                         printf("Port %d can not be set back "
1219                                                         "to stopped\n", pi);
1220                                 printf("Fail to configure port %d rx queues\n", pi);
1221                                 /* try to reconfigure queues next time */
1222                                 port->need_reconfig_queues = 1;
1223                                 return;
1224                         }
1225                 }
1226                 /* start port */
1227                 if (rte_eth_dev_start(pi) < 0) {
1228                         printf("Fail to start port %d\n", pi);
1229
1230                         /* Fail to setup rx queue, return */
1231                         if (rte_atomic16_cmpset(&(port->port_status),
1232                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1233                                 printf("Port %d can not be set back to "
1234                                                         "stopped\n", pi);
1235                         continue;
1236                 }
1237
1238                 if (rte_atomic16_cmpset(&(port->port_status),
1239                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1240                         printf("Port %d can not be set into started\n", pi);
1241
1242                 /* at least one port started, need checking link status */
1243                 need_check_link_status = 1;
1244         }
1245
1246         if (need_check_link_status)
1247                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1248         else
1249                 printf("Please stop the ports first\n");
1250
1251         printf("Done\n");
1252 }
1253
1254 void
1255 stop_port(portid_t pid)
1256 {
1257         portid_t pi;
1258         struct rte_port *port;
1259         int need_check_link_status = 0;
1260
1261         if (test_done == 0) {
1262                 printf("Please stop forwarding first\n");
1263                 return;
1264         }
1265         if (dcb_test) {
1266                 dcb_test = 0;
1267                 dcb_config = 0;
1268         }
1269         printf("Stopping ports...\n");
1270
1271         for (pi = 0; pi < nb_ports; pi++) {
1272                 if (pid < nb_ports && pid != pi)
1273                         continue;
1274
1275                 port = &ports[pi];
1276                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1277                                                 RTE_PORT_HANDLING) == 0)
1278                         continue;
1279
1280                 rte_eth_dev_stop(pi);
1281
1282                 if (rte_atomic16_cmpset(&(port->port_status),
1283                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1284                         printf("Port %d can not be set into stopped\n", pi);
1285                 need_check_link_status = 1;
1286         }
1287         if (need_check_link_status)
1288                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1289
1290         printf("Done\n");
1291 }
1292
1293 void
1294 close_port(portid_t pid)
1295 {
1296         portid_t pi;
1297         struct rte_port *port;
1298
1299         if (test_done == 0) {
1300                 printf("Please stop forwarding first\n");
1301                 return;
1302         }
1303
1304         printf("Closing ports...\n");
1305
1306         for (pi = 0; pi < nb_ports; pi++) {
1307                 if (pid < nb_ports && pid != pi)
1308                         continue;
1309
1310                 port = &ports[pi];
1311                 if (rte_atomic16_cmpset(&(port->port_status),
1312                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1313                         printf("Port %d is now not stopped\n", pi);
1314                         continue;
1315                 }
1316
1317                 rte_eth_dev_close(pi);
1318
1319                 if (rte_atomic16_cmpset(&(port->port_status),
1320                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1321                         printf("Port %d can not be set into stopped\n", pi);
1322         }
1323
1324         printf("Done\n");
1325 }
1326
1327 int
1328 all_ports_stopped(void)
1329 {
1330         portid_t pi;
1331         struct rte_port *port;
1332
1333         for (pi = 0; pi < nb_ports; pi++) {
1334                 port = &ports[pi];
1335                 if (port->port_status != RTE_PORT_STOPPED)
1336                         return 0;
1337         }
1338
1339         return 1;
1340 }
1341
1342 void
1343 pmd_test_exit(void)
1344 {
1345         portid_t pt_id;
1346
1347         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1348                 printf("Stopping port %d...", pt_id);
1349                 fflush(stdout);
1350                 rte_eth_dev_close(pt_id);
1351                 printf("done\n");
1352         }
1353         printf("bye...\n");
1354 }
1355
1356 typedef void (*cmd_func_t)(void);
1357 struct pmd_test_command {
1358         const char *cmd_name;
1359         cmd_func_t cmd_func;
1360 };
1361
1362 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1363
1364 /* Check the link status of all ports in up to 9s, and print them finally */
1365 static void
1366 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1367 {
1368 #define CHECK_INTERVAL 100 /* 100ms */
1369 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1370         uint8_t portid, count, all_ports_up, print_flag = 0;
1371         struct rte_eth_link link;
1372
1373         printf("Checking link statuses...\n");
1374         fflush(stdout);
1375         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1376                 all_ports_up = 1;
1377                 for (portid = 0; portid < port_num; portid++) {
1378                         if ((port_mask & (1 << portid)) == 0)
1379                                 continue;
1380                         memset(&link, 0, sizeof(link));
1381                         rte_eth_link_get_nowait(portid, &link);
1382                         /* print link status if flag set */
1383                         if (print_flag == 1) {
1384                                 if (link.link_status)
1385                                         printf("Port %d Link Up - speed %u "
1386                                                 "Mbps - %s\n", (uint8_t)portid,
1387                                                 (unsigned)link.link_speed,
1388                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1389                                         ("full-duplex") : ("half-duplex\n"));
1390                                 else
1391                                         printf("Port %d Link Down\n",
1392                                                 (uint8_t)portid);
1393                                 continue;
1394                         }
1395                         /* clear all_ports_up flag if any link down */
1396                         if (link.link_status == 0) {
1397                                 all_ports_up = 0;
1398                                 break;
1399                         }
1400                 }
1401                 /* after finally printing all link status, get out */
1402                 if (print_flag == 1)
1403                         break;
1404
1405                 if (all_ports_up == 0) {
1406                         fflush(stdout);
1407                         rte_delay_ms(CHECK_INTERVAL);
1408                 }
1409
1410                 /* set the print_flag if all ports up or timeout */
1411                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1412                         print_flag = 1;
1413                 }
1414         }
1415 }
1416
1417 static int
1418 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1419 {
1420         uint16_t i;
1421         int diag;
1422         uint8_t mapping_found = 0;
1423
1424         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1425                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1426                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1427                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1428                                         tx_queue_stats_mappings[i].queue_id,
1429                                         tx_queue_stats_mappings[i].stats_counter_id);
1430                         if (diag != 0)
1431                                 return diag;
1432                         mapping_found = 1;
1433                 }
1434         }
1435         if (mapping_found)
1436                 port->tx_queue_stats_mapping_enabled = 1;
1437         return 0;
1438 }
1439
1440 static int
1441 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1442 {
1443         uint16_t i;
1444         int diag;
1445         uint8_t mapping_found = 0;
1446
1447         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1448                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1449                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1450                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1451                                         rx_queue_stats_mappings[i].queue_id,
1452                                         rx_queue_stats_mappings[i].stats_counter_id);
1453                         if (diag != 0)
1454                                 return diag;
1455                         mapping_found = 1;
1456                 }
1457         }
1458         if (mapping_found)
1459                 port->rx_queue_stats_mapping_enabled = 1;
1460         return 0;
1461 }
1462
1463 static void
1464 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1465 {
1466         int diag = 0;
1467
1468         diag = set_tx_queue_stats_mapping_registers(pi, port);
1469         if (diag != 0) {
1470                 if (diag == -ENOTSUP) {
1471                         port->tx_queue_stats_mapping_enabled = 0;
1472                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1473                 }
1474                 else
1475                         rte_exit(EXIT_FAILURE,
1476                                         "set_tx_queue_stats_mapping_registers "
1477                                         "failed for port id=%d diag=%d\n",
1478                                         pi, diag);
1479         }
1480
1481         diag = set_rx_queue_stats_mapping_registers(pi, port);
1482         if (diag != 0) {
1483                 if (diag == -ENOTSUP) {
1484                         port->rx_queue_stats_mapping_enabled = 0;
1485                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1486                 }
1487                 else
1488                         rte_exit(EXIT_FAILURE,
1489                                         "set_rx_queue_stats_mapping_registers "
1490                                         "failed for port id=%d diag=%d\n",
1491                                         pi, diag);
1492         }
1493 }
1494
1495 void
1496 init_port_config(void)
1497 {
1498         portid_t pid;
1499         struct rte_port *port;
1500
1501         for (pid = 0; pid < nb_ports; pid++) {
1502                 port = &ports[pid];
1503                 port->dev_conf.rxmode = rx_mode;
1504                 port->dev_conf.fdir_conf = fdir_conf;
1505                 if (nb_rxq > 0) {
1506                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1507                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1508                 } else {
1509                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1510                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1511                 }
1512                 port->rx_conf.rx_thresh = rx_thresh;
1513                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1514                 port->rx_conf.rx_drop_en = rx_drop_en;
1515                 port->tx_conf.tx_thresh = tx_thresh;
1516                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1517                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1518                 port->tx_conf.txq_flags = txq_flags;
1519
1520                 rte_eth_macaddr_get(pid, &port->eth_addr);
1521
1522                 map_port_queue_stats_mapping_registers(pid, port);
1523         }
1524 }
1525
1526 const uint16_t vlan_tags[] = {
1527                 0,  1,  2,  3,  4,  5,  6,  7,
1528                 8,  9, 10, 11,  12, 13, 14, 15,
1529                 16, 17, 18, 19, 20, 21, 22, 23,
1530                 24, 25, 26, 27, 28, 29, 30, 31
1531 };
1532
1533 static  int
1534 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1535 {
1536         uint8_t i;
1537  
1538         /*
1539          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1540          * given above, and the number of traffic classes available for use.
1541          */
1542         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1543                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1544                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1545  
1546                 /* VMDQ+DCB RX and TX configrations */
1547                 vmdq_rx_conf.enable_default_pool = 0;
1548                 vmdq_rx_conf.default_pool = 0;
1549                 vmdq_rx_conf.nb_queue_pools =
1550                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1551                 vmdq_tx_conf.nb_queue_pools =
1552                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1553  
1554                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1555                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1556                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1557                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1558                 }
1559                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1560                         vmdq_rx_conf.dcb_queue[i] = i;
1561                         vmdq_tx_conf.dcb_queue[i] = i;
1562                 }
1563  
1564                 /*set DCB mode of RX and TX of multiple queues*/
1565                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1566                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1567                 if (dcb_conf->pfc_en)
1568                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1569                 else
1570                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1571  
1572                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1573                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1574                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1575                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1576         }
1577         else {
1578                 struct rte_eth_dcb_rx_conf rx_conf;
1579                 struct rte_eth_dcb_tx_conf tx_conf;
1580  
1581                 /* queue mapping configuration of DCB RX and TX */
1582                 if (dcb_conf->num_tcs == ETH_4_TCS)
1583                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1584                 else
1585                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1586  
1587                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1588                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1589  
1590                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1591                         rx_conf.dcb_queue[i] = i;
1592                         tx_conf.dcb_queue[i] = i;
1593                 }
1594                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1595                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1596                 if (dcb_conf->pfc_en)
1597                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1598                 else
1599                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1600                  
1601                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1602                                 sizeof(struct rte_eth_dcb_rx_conf)));
1603                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1604                                 sizeof(struct rte_eth_dcb_tx_conf)));
1605         }
1606
1607         return 0;
1608 }
1609
1610 int
1611 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1612 {
1613         struct rte_eth_conf port_conf;
1614         struct rte_port *rte_port;
1615         int retval;
1616         uint16_t nb_vlan;
1617         uint16_t i;
1618  
1619         /* rxq and txq configuration in dcb mode */
1620         nb_rxq = 128;
1621         nb_txq = 128;
1622         rx_free_thresh = 64;
1623  
1624         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1625         /* Enter DCB configuration status */
1626         dcb_config = 1;
1627  
1628         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1629         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1630         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1631         if (retval < 0)
1632                 return retval;
1633  
1634         rte_port = &ports[pid];
1635         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1636  
1637         rte_port->rx_conf.rx_thresh = rx_thresh;
1638         rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1639         rte_port->tx_conf.tx_thresh = tx_thresh;
1640         rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1641         rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1642         /* VLAN filter */
1643         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1644         for (i = 0; i < nb_vlan; i++){
1645                 rx_vft_set(pid, vlan_tags[i], 1);
1646         }
1647  
1648         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1649         map_port_queue_stats_mapping_registers(pid, rte_port);
1650  
1651         return 0;
1652 }
1653
1654 #ifdef RTE_EXEC_ENV_BAREMETAL
1655 #define main _main
1656 #endif
1657
1658 int
1659 main(int argc, char** argv)
1660 {
1661         int  diag;
1662         uint8_t port_id;
1663
1664         diag = rte_eal_init(argc, argv);
1665         if (diag < 0)
1666                 rte_panic("Cannot init EAL\n");
1667
1668         if (rte_pmd_init_all())
1669                 rte_panic("Cannot init PMD\n");
1670
1671         if (rte_eal_pci_probe())
1672                 rte_panic("Cannot probe PCI\n");
1673
1674         nb_ports = (portid_t) rte_eth_dev_count();
1675         if (nb_ports == 0)
1676                 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1677                                                         "check that "
1678                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1679                           "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1680                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1681                           "configuration file\n");
1682
1683         set_def_fwd_config();
1684         if (nb_lcores == 0)
1685                 rte_panic("Empty set of forwarding logical cores - check the "
1686                           "core mask supplied in the command parameters\n");
1687
1688         argc -= diag;
1689         argv += diag;
1690         if (argc > 1)
1691                 launch_args_parse(argc, argv);
1692
1693         if (nb_rxq > nb_txq)
1694                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1695                        "but nb_txq=%d will prevent to fully test it.\n",
1696                        nb_rxq, nb_txq);
1697
1698         init_config();
1699         start_port(RTE_PORT_ALL);
1700
1701         /* set all ports to promiscuous mode by default */
1702         for (port_id = 0; port_id < nb_ports; port_id++)
1703                 rte_eth_promiscuous_enable(port_id);
1704
1705         if (interactive == 1)
1706                 prompt();
1707         else {
1708                 char c;
1709                 int rc;
1710
1711                 printf("No commandline core given, start packet forwarding\n");
1712                 start_packet_forwarding(0);
1713                 printf("Press enter to exit\n");
1714                 rc = read(0, &c, 1);
1715                 if (rc < 0)
1716                         return 1;
1717         }
1718
1719         return 0;
1720 }