e1d765e74fc3b0fe2a5d445b08999e35b4d5588f
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <stdarg.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <signal.h>
39 #include <string.h>
40 #include <time.h>
41 #include <fcntl.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
62 #include <rte_eal.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_ring.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76
77 #include "testpmd.h"
78
79 uint16_t verbose_level = 0; /**< Silent by default. */
80
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
83
84 /*
85  * NUMA support configuration.
86  * When set, the NUMA support attempts to dispatch the allocation of the
87  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88  * probed ports among the CPU sockets 0 and 1.
89  * Otherwise, all memory is allocated from CPU socket 0.
90  */
91 uint8_t numa_support = 0; /**< No numa support by default */
92
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instanciated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;        /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130         &io_fwd_engine,
131         &mac_fwd_engine,
132         &rx_only_engine,
133         &tx_only_engine,
134         &csum_fwd_engine,
135 #ifdef RTE_LIBRTE_IEEE1588
136         &ieee1588_fwd_engine,
137 #endif
138         NULL,
139 };
140
141 struct fwd_config cur_fwd_config;
142 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
143
144 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
145 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
146                                       * specified on command-line. */
147
148 /*
149  * Configuration of packet segments used by the "txonly" processing engine.
150  */
151 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
152 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
153         TXONLY_DEF_PACKET_LEN,
154 };
155 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
156
157 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
158 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
159
160 /* current configuration is in DCB or not,0 means it is not in DCB mode */
161 uint8_t dcb_config = 0;
162  
163 /* Whether the dcb is in testing status */
164 uint8_t dcb_test = 0;
165  
166 /* DCB on and VT on mapping is default */
167 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
168
169 /*
170  * Configurable number of RX/TX queues.
171  */
172 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
173 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
174
175 /*
176  * Configurable number of RX/TX ring descriptors.
177  */
178 #define RTE_TEST_RX_DESC_DEFAULT 128
179 #define RTE_TEST_TX_DESC_DEFAULT 512
180 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
181 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
182
183 /*
184  * Configurable values of RX and TX ring threshold registers.
185  */
186 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
187 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
188 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
189
190 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
191 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
192 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
193
194 struct rte_eth_thresh rx_thresh = {
195         .pthresh = RX_PTHRESH,
196         .hthresh = RX_HTHRESH,
197         .wthresh = RX_WTHRESH,
198 };
199
200 struct rte_eth_thresh tx_thresh = {
201         .pthresh = TX_PTHRESH,
202         .hthresh = TX_HTHRESH,
203         .wthresh = TX_WTHRESH,
204 };
205
206 /*
207  * Configurable value of RX free threshold.
208  */
209 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
210
211 /*
212  * Configurable value of RX drop enable.
213  */
214 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
215
216 /*
217  * Configurable value of TX free threshold.
218  */
219 uint16_t tx_free_thresh = 0; /* Use default values. */
220
221 /*
222  * Configurable value of TX RS bit threshold.
223  */
224 uint16_t tx_rs_thresh = 0; /* Use default values. */
225
226 /*
227  * Configurable value of TX queue flags.
228  */
229 uint32_t txq_flags = 0; /* No flags set. */
230
231 /*
232  * Receive Side Scaling (RSS) configuration.
233  */
234 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
235
236 /*
237  * Port topology configuration
238  */
239 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
240
241 /*
242  * Ethernet device configuration.
243  */
244 struct rte_eth_rxmode rx_mode = {
245         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
246         .split_hdr_size = 0,
247         .header_split   = 0, /**< Header Split disabled. */
248         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
249         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
250         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
251         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
252         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
253         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
254 };
255
256 struct rte_fdir_conf fdir_conf = {
257         .mode = RTE_FDIR_MODE_NONE,
258         .pballoc = RTE_FDIR_PBALLOC_64K,
259         .status = RTE_FDIR_REPORT_STATUS,
260         .flexbytes_offset = 0x6,
261         .drop_queue = 127,
262 };
263
264 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
265
266 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
267 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
268
269 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
270 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
271
272 uint16_t nb_tx_queue_stats_mappings = 0;
273 uint16_t nb_rx_queue_stats_mappings = 0;
274
275 /* Forward function declarations */
276 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
277 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
278
279 /*
280  * Check if all the ports are started.
281  * If yes, return positive value. If not, return zero.
282  */
283 static int all_ports_started(void);
284
285 /*
286  * Setup default configuration.
287  */
288 static void
289 set_default_fwd_lcores_config(void)
290 {
291         unsigned int i;
292         unsigned int nb_lc;
293
294         nb_lc = 0;
295         for (i = 0; i < RTE_MAX_LCORE; i++) {
296                 if (! rte_lcore_is_enabled(i))
297                         continue;
298                 if (i == rte_get_master_lcore())
299                         continue;
300                 fwd_lcores_cpuids[nb_lc++] = i;
301         }
302         nb_lcores = (lcoreid_t) nb_lc;
303         nb_cfg_lcores = nb_lcores;
304         nb_fwd_lcores = 1;
305 }
306
307 static void
308 set_def_peer_eth_addrs(void)
309 {
310         portid_t i;
311
312         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
313                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
314                 peer_eth_addrs[i].addr_bytes[5] = i;
315         }
316 }
317
318 static void
319 set_default_fwd_ports_config(void)
320 {
321         portid_t pt_id;
322
323         for (pt_id = 0; pt_id < nb_ports; pt_id++)
324                 fwd_ports_ids[pt_id] = pt_id;
325
326         nb_cfg_ports = nb_ports;
327         nb_fwd_ports = nb_ports;
328 }
329
330 void
331 set_def_fwd_config(void)
332 {
333         set_default_fwd_lcores_config();
334         set_def_peer_eth_addrs();
335         set_default_fwd_ports_config();
336 }
337
338 /*
339  * Configuration initialisation done once at init time.
340  */
341 struct mbuf_ctor_arg {
342         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
343         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
344 };
345
346 struct mbuf_pool_ctor_arg {
347         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
348 };
349
350 static void
351 testpmd_mbuf_ctor(struct rte_mempool *mp,
352                   void *opaque_arg,
353                   void *raw_mbuf,
354                   __attribute__((unused)) unsigned i)
355 {
356         struct mbuf_ctor_arg *mb_ctor_arg;
357         struct rte_mbuf    *mb;
358
359         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
360         mb = (struct rte_mbuf *) raw_mbuf;
361
362         mb->type         = RTE_MBUF_PKT;
363         mb->pool         = mp;
364         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
365         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
366                         mb_ctor_arg->seg_buf_offset);
367         mb->buf_len      = mb_ctor_arg->seg_buf_size;
368         mb->type         = RTE_MBUF_PKT;
369         mb->ol_flags     = 0;
370         mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
371         mb->pkt.nb_segs  = 1;
372         mb->pkt.vlan_macip.data = 0;
373         mb->pkt.hash.rss = 0;
374 }
375
376 static void
377 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
378                        void *opaque_arg)
379 {
380         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
381         struct rte_pktmbuf_pool_private *mbp_priv;
382
383         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
384                 printf("%s(%s) private_data_size %d < %d\n",
385                        __func__, mp->name, (int) mp->private_data_size,
386                        (int) sizeof(struct rte_pktmbuf_pool_private));
387                 return;
388         }
389         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
390         mbp_priv = (struct rte_pktmbuf_pool_private *)
391                 ((char *)mp + sizeof(struct rte_mempool));
392         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
393 }
394
395 static void
396 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
397                  unsigned int socket_id)
398 {
399         char pool_name[RTE_MEMPOOL_NAMESIZE];
400         struct rte_mempool *rte_mp;
401         struct mbuf_pool_ctor_arg mbp_ctor_arg;
402         struct mbuf_ctor_arg mb_ctor_arg;
403         uint32_t mb_size;
404
405         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
406                                                 mbuf_seg_size);
407         mb_ctor_arg.seg_buf_offset =
408                 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
409         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
410         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
411         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
412         rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
413                                     (unsigned) mb_mempool_cache,
414                                     sizeof(struct rte_pktmbuf_pool_private),
415                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
416                                     testpmd_mbuf_ctor, &mb_ctor_arg,
417                                     socket_id, 0);
418         if (rte_mp == NULL) {
419                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
420                                                 "failed\n", socket_id);
421         }
422 }
423
424 static void
425 init_config(void)
426 {
427         portid_t pid;
428         struct rte_port *port;
429         struct rte_mempool *mbp;
430         unsigned int nb_mbuf_per_pool;
431         lcoreid_t  lc_id;
432
433         /* Configuration of logical cores. */
434         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
435                                 sizeof(struct fwd_lcore *) * nb_lcores,
436                                 CACHE_LINE_SIZE);
437         if (fwd_lcores == NULL) {
438                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
439                                                         "failed\n", nb_lcores);
440         }
441         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
442                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
443                                                sizeof(struct fwd_lcore),
444                                                CACHE_LINE_SIZE);
445                 if (fwd_lcores[lc_id] == NULL) {
446                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
447                                                                 "failed\n");
448                 }
449                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
450         }
451
452         /*
453          * Create pools of mbuf.
454          * If NUMA support is disabled, create a single pool of mbuf in
455          * socket 0 memory.
456          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
457          *
458          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
459          * nb_txd can be configured at run time.
460          */
461         if (param_total_num_mbufs)
462                 nb_mbuf_per_pool = param_total_num_mbufs;
463         else {
464                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
465                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
466                 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
467         }
468         if (numa_support) {
469                 nb_mbuf_per_pool /= 2;
470                 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
471                 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
472         } else {
473                 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
474         }
475
476         /*
477          * Records which Mbuf pool to use by each logical core, if needed.
478          */
479         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
480                 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
481                 if (mbp == NULL)
482                         mbp = mbuf_pool_find(0);
483                 fwd_lcores[lc_id]->mbp = mbp;
484         }
485
486         /* Configuration of Ethernet ports. */
487         ports = rte_zmalloc("testpmd: ports",
488                             sizeof(struct rte_port) * nb_ports,
489                             CACHE_LINE_SIZE);
490         if (ports == NULL) {
491                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
492                                                         "failed\n", nb_ports);
493         }
494
495         for (pid = 0; pid < nb_ports; pid++) {
496                 port = &ports[pid];
497                 rte_eth_dev_info_get(pid, &port->dev_info);
498
499                 /* set flag to initialize port/queue */
500                 port->need_reconfig = 1;
501                 port->need_reconfig_queues = 1;
502         }
503
504         init_port_config();
505
506         /* Configuration of packet forwarding streams. */
507         if (init_fwd_streams() < 0)
508                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
509 }
510
511 int
512 init_fwd_streams(void)
513 {
514         portid_t pid;
515         struct rte_port *port;
516         streamid_t sm_id, nb_fwd_streams_new;
517
518         /* set socket id according to numa or not */
519         for (pid = 0; pid < nb_ports; pid++) {
520                 port = &ports[pid];
521                 if (nb_rxq > port->dev_info.max_rx_queues) {
522                         printf("Fail: nb_rxq(%d) is greater than "
523                                 "max_rx_queues(%d)\n", nb_rxq,
524                                 port->dev_info.max_rx_queues);
525                         return -1;
526                 }
527                 if (nb_txq > port->dev_info.max_tx_queues) {
528                         printf("Fail: nb_txq(%d) is greater than "
529                                 "max_tx_queues(%d)\n", nb_txq,
530                                 port->dev_info.max_tx_queues);
531                         return -1;
532                 }
533                 if (numa_support)
534                         port->socket_id = (pid < (nb_ports >> 1)) ? 0 : 1;
535                 else
536                         port->socket_id = 0;
537         }
538
539         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
540         if (nb_fwd_streams_new == nb_fwd_streams)
541                 return 0;
542         /* clear the old */
543         if (fwd_streams != NULL) {
544                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
545                         if (fwd_streams[sm_id] == NULL)
546                                 continue;
547                         rte_free(fwd_streams[sm_id]);
548                         fwd_streams[sm_id] = NULL;
549                 }
550                 rte_free(fwd_streams);
551                 fwd_streams = NULL;
552         }
553
554         /* init new */
555         nb_fwd_streams = nb_fwd_streams_new;
556         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
557                 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
558         if (fwd_streams == NULL)
559                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
560                                                 "failed\n", nb_fwd_streams);
561
562         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
563                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
564                                 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
565                 if (fwd_streams[sm_id] == NULL)
566                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
567                                                                 " failed\n");
568         }
569
570         return 0;
571 }
572
573 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
574 static void
575 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
576 {
577         unsigned int total_burst;
578         unsigned int nb_burst;
579         unsigned int burst_stats[3];
580         uint16_t pktnb_stats[3];
581         uint16_t nb_pkt;
582         int burst_percent[3];
583
584         /*
585          * First compute the total number of packet bursts and the
586          * two highest numbers of bursts of the same number of packets.
587          */
588         total_burst = 0;
589         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
590         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
591         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
592                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
593                 if (nb_burst == 0)
594                         continue;
595                 total_burst += nb_burst;
596                 if (nb_burst > burst_stats[0]) {
597                         burst_stats[1] = burst_stats[0];
598                         pktnb_stats[1] = pktnb_stats[0];
599                         burst_stats[0] = nb_burst;
600                         pktnb_stats[0] = nb_pkt;
601                 }
602         }
603         if (total_burst == 0)
604                 return;
605         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
606         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
607                burst_percent[0], (int) pktnb_stats[0]);
608         if (burst_stats[0] == total_burst) {
609                 printf("]\n");
610                 return;
611         }
612         if (burst_stats[0] + burst_stats[1] == total_burst) {
613                 printf(" + %d%% of %d pkts]\n",
614                        100 - burst_percent[0], pktnb_stats[1]);
615                 return;
616         }
617         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
618         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
619         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
620                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
621                 return;
622         }
623         printf(" + %d%% of %d pkts + %d%% of others]\n",
624                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
625 }
626 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
627
628 static void
629 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
630 {
631         struct rte_port *port;
632         uint8_t i;
633
634         static const char *fwd_stats_border = "----------------------";
635
636         port = &ports[port_id];
637         printf("\n  %s Forward statistics for port %-2d %s\n",
638                fwd_stats_border, port_id, fwd_stats_border);
639
640         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
641                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
642                        "%-"PRIu64"\n",
643                        stats->ipackets, stats->ierrors,
644                        (uint64_t) (stats->ipackets + stats->ierrors));
645
646                 if (cur_fwd_eng == &csum_fwd_engine)
647                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
648                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
649
650                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
651                        "%-"PRIu64"\n",
652                        stats->opackets, port->tx_dropped,
653                        (uint64_t) (stats->opackets + port->tx_dropped));
654
655                 if (stats->rx_nombuf > 0)
656                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
657
658         }
659         else {
660                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
661                        "%14"PRIu64"\n",
662                        stats->ipackets, stats->ierrors,
663                        (uint64_t) (stats->ipackets + stats->ierrors));
664
665                 if (cur_fwd_eng == &csum_fwd_engine)
666                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
667                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
668
669                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
670                        "%14"PRIu64"\n",
671                        stats->opackets, port->tx_dropped,
672                        (uint64_t) (stats->opackets + port->tx_dropped));
673
674                 if (stats->rx_nombuf > 0)
675                         printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
676         }
677 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
678         if (port->rx_stream)
679                 pkt_burst_stats_display("RX",
680                         &port->rx_stream->rx_burst_stats);
681         if (port->tx_stream)
682                 pkt_burst_stats_display("TX",
683                         &port->tx_stream->tx_burst_stats);
684 #endif
685         /* stats fdir */
686         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
687                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
688                        stats->fdirmiss,
689                        stats->fdirmatch);
690
691         if (port->rx_queue_stats_mapping_enabled) {
692                 printf("\n");
693                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
694                         printf("  Stats reg %2d RX-packets:%14"PRIu64
695                                "     RX-errors:%14"PRIu64
696                                "    RX-bytes:%14"PRIu64"\n",
697                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
698                 }
699                 printf("\n");
700         }
701         if (port->tx_queue_stats_mapping_enabled) {
702                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
703                         printf("  Stats reg %2d TX-packets:%14"PRIu64
704                                "                                 TX-bytes:%14"PRIu64"\n",
705                                i, stats->q_opackets[i], stats->q_obytes[i]);
706                 }
707         }
708
709         printf("  %s--------------------------------%s\n",
710                fwd_stats_border, fwd_stats_border);
711 }
712
713 static void
714 fwd_stream_stats_display(streamid_t stream_id)
715 {
716         struct fwd_stream *fs;
717         static const char *fwd_top_stats_border = "-------";
718
719         fs = fwd_streams[stream_id];
720         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
721             (fs->fwd_dropped == 0))
722                 return;
723         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
724                "TX Port=%2d/Queue=%2d %s\n",
725                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
726                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
727         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
728                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
729
730         /* if checksum mode */
731         if (cur_fwd_eng == &csum_fwd_engine) {
732                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
733                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
734         }
735
736 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
737         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
738         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
739 #endif
740 }
741
742 static void
743 flush_all_rx_queues(void)
744 {
745         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
746         portid_t  rxp;
747         queueid_t rxq;
748         uint16_t  nb_rx;
749         uint16_t  i;
750         uint8_t   j;
751
752         for (j = 0; j < 2; j++) {
753                 for (rxp = 0; rxp < nb_ports; rxp++) {
754                         for (rxq = 0; rxq < nb_rxq; rxq++) {
755                                 do {
756                                         nb_rx = rte_eth_rx_burst(rxp, rxq,
757                                                 pkts_burst, MAX_PKT_BURST);
758                                         for (i = 0; i < nb_rx; i++)
759                                                 rte_pktmbuf_free(pkts_burst[i]);
760                                 } while (nb_rx > 0);
761                         }
762                 }
763                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
764         }
765 }
766
767 static void
768 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
769 {
770         struct fwd_stream **fsm;
771         streamid_t nb_fs;
772         streamid_t sm_id;
773
774         fsm = &fwd_streams[fc->stream_idx];
775         nb_fs = fc->stream_nb;
776         do {
777                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
778                         (*pkt_fwd)(fsm[sm_id]);
779         } while (! fc->stopped);
780 }
781
782 static int
783 start_pkt_forward_on_core(void *fwd_arg)
784 {
785         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
786                              cur_fwd_config.fwd_eng->packet_fwd);
787         return 0;
788 }
789
790 /*
791  * Run the TXONLY packet forwarding engine to send a single burst of packets.
792  * Used to start communication flows in network loopback test configurations.
793  */
794 static int
795 run_one_txonly_burst_on_core(void *fwd_arg)
796 {
797         struct fwd_lcore *fwd_lc;
798         struct fwd_lcore tmp_lcore;
799
800         fwd_lc = (struct fwd_lcore *) fwd_arg;
801         tmp_lcore = *fwd_lc;
802         tmp_lcore.stopped = 1;
803         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
804         return 0;
805 }
806
807 /*
808  * Launch packet forwarding:
809  *     - Setup per-port forwarding context.
810  *     - launch logical cores with their forwarding configuration.
811  */
812 static void
813 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
814 {
815         port_fwd_begin_t port_fwd_begin;
816         unsigned int i;
817         unsigned int lc_id;
818         int diag;
819
820         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
821         if (port_fwd_begin != NULL) {
822                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
823                         (*port_fwd_begin)(fwd_ports_ids[i]);
824         }
825         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
826                 lc_id = fwd_lcores_cpuids[i];
827                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
828                         fwd_lcores[i]->stopped = 0;
829                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
830                                                      fwd_lcores[i], lc_id);
831                         if (diag != 0)
832                                 printf("launch lcore %u failed - diag=%d\n",
833                                        lc_id, diag);
834                 }
835         }
836 }
837
838 /*
839  * Launch packet forwarding configuration.
840  */
841 void
842 start_packet_forwarding(int with_tx_first)
843 {
844         port_fwd_begin_t port_fwd_begin;
845         port_fwd_end_t  port_fwd_end;
846         struct rte_port *port;
847         unsigned int i;
848         portid_t   pt_id;
849         streamid_t sm_id;
850
851         if (all_ports_started() == 0) {
852                 printf("Not all ports were started\n");
853                 return;
854         }
855         if (test_done == 0) {
856                 printf("Packet forwarding already started\n");
857                 return;
858         }
859         if((dcb_test) && (nb_fwd_lcores == 1)) {
860                 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
861                 return;
862         }
863         test_done = 0;
864         flush_all_rx_queues();
865         fwd_config_setup();
866         rxtx_config_display();
867
868         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
869                 pt_id = fwd_ports_ids[i];
870                 port = &ports[pt_id];
871                 rte_eth_stats_get(pt_id, &port->stats);
872                 port->tx_dropped = 0;
873
874                 map_port_queue_stats_mapping_registers(pt_id, port);
875         }
876         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
877                 fwd_streams[sm_id]->rx_packets = 0;
878                 fwd_streams[sm_id]->tx_packets = 0;
879                 fwd_streams[sm_id]->fwd_dropped = 0;
880                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
881                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
882
883 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
884                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
885                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
886                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
887                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
888 #endif
889 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
890                 fwd_streams[sm_id]->core_cycles = 0;
891 #endif
892         }
893         if (with_tx_first) {
894                 port_fwd_begin = tx_only_engine.port_fwd_begin;
895                 if (port_fwd_begin != NULL) {
896                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
897                                 (*port_fwd_begin)(fwd_ports_ids[i]);
898                 }
899                 launch_packet_forwarding(run_one_txonly_burst_on_core);
900                 rte_eal_mp_wait_lcore();
901                 port_fwd_end = tx_only_engine.port_fwd_end;
902                 if (port_fwd_end != NULL) {
903                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
904                                 (*port_fwd_end)(fwd_ports_ids[i]);
905                 }
906         }
907         launch_packet_forwarding(start_pkt_forward_on_core);
908 }
909
910 void
911 stop_packet_forwarding(void)
912 {
913         struct rte_eth_stats stats;
914         struct rte_port *port;
915         port_fwd_end_t  port_fwd_end;
916         int i;
917         portid_t   pt_id;
918         streamid_t sm_id;
919         lcoreid_t  lc_id;
920         uint64_t total_recv;
921         uint64_t total_xmit;
922         uint64_t total_rx_dropped;
923         uint64_t total_tx_dropped;
924         uint64_t total_rx_nombuf;
925         uint64_t tx_dropped;
926         uint64_t rx_bad_ip_csum;
927         uint64_t rx_bad_l4_csum;
928 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
929         uint64_t fwd_cycles;
930 #endif
931         static const char *acc_stats_border = "+++++++++++++++";
932
933         if (all_ports_started() == 0) {
934                 printf("Not all ports were started\n");
935                 return;
936         }
937         if (test_done) {
938                 printf("Packet forwarding not started\n");
939                 return;
940         }
941         printf("Telling cores to stop...");
942         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
943                 fwd_lcores[lc_id]->stopped = 1;
944         printf("\nWaiting for lcores to finish...\n");
945         rte_eal_mp_wait_lcore();
946         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
947         if (port_fwd_end != NULL) {
948                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
949                         pt_id = fwd_ports_ids[i];
950                         (*port_fwd_end)(pt_id);
951                 }
952         }
953 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
954         fwd_cycles = 0;
955 #endif
956         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
957                 if (cur_fwd_config.nb_fwd_streams >
958                     cur_fwd_config.nb_fwd_ports) {
959                         fwd_stream_stats_display(sm_id);
960                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
961                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
962                 } else {
963                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
964                                 fwd_streams[sm_id];
965                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
966                                 fwd_streams[sm_id];
967                 }
968                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
969                 tx_dropped = (uint64_t) (tx_dropped +
970                                          fwd_streams[sm_id]->fwd_dropped);
971                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
972
973                 rx_bad_ip_csum =
974                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
975                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
976                                          fwd_streams[sm_id]->rx_bad_ip_csum);
977                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
978                                                         rx_bad_ip_csum;
979
980                 rx_bad_l4_csum =
981                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
982                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
983                                          fwd_streams[sm_id]->rx_bad_l4_csum);
984                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
985                                                         rx_bad_l4_csum;
986
987 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
988                 fwd_cycles = (uint64_t) (fwd_cycles +
989                                          fwd_streams[sm_id]->core_cycles);
990 #endif
991         }
992         total_recv = 0;
993         total_xmit = 0;
994         total_rx_dropped = 0;
995         total_tx_dropped = 0;
996         total_rx_nombuf  = 0;
997         for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
998                 pt_id = fwd_ports_ids[i];
999
1000                 port = &ports[pt_id];
1001                 rte_eth_stats_get(pt_id, &stats);
1002                 stats.ipackets -= port->stats.ipackets;
1003                 port->stats.ipackets = 0;
1004                 stats.opackets -= port->stats.opackets;
1005                 port->stats.opackets = 0;
1006                 stats.ibytes   -= port->stats.ibytes;
1007                 port->stats.ibytes = 0;
1008                 stats.obytes   -= port->stats.obytes;
1009                 port->stats.obytes = 0;
1010                 stats.ierrors  -= port->stats.ierrors;
1011                 port->stats.ierrors = 0;
1012                 stats.oerrors  -= port->stats.oerrors;
1013                 port->stats.oerrors = 0;
1014                 stats.rx_nombuf -= port->stats.rx_nombuf;
1015                 port->stats.rx_nombuf = 0;
1016                 stats.fdirmatch -= port->stats.fdirmatch;
1017                 port->stats.rx_nombuf = 0;
1018                 stats.fdirmiss -= port->stats.fdirmiss;
1019                 port->stats.rx_nombuf = 0;
1020
1021                 total_recv += stats.ipackets;
1022                 total_xmit += stats.opackets;
1023                 total_rx_dropped += stats.ierrors;
1024                 total_tx_dropped += port->tx_dropped;
1025                 total_rx_nombuf  += stats.rx_nombuf;
1026
1027                 fwd_port_stats_display(pt_id, &stats);
1028         }
1029         printf("\n  %s Accumulated forward statistics for all ports"
1030                "%s\n",
1031                acc_stats_border, acc_stats_border);
1032         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1033                "%-"PRIu64"\n"
1034                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1035                "%-"PRIu64"\n",
1036                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1037                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1038         if (total_rx_nombuf > 0)
1039                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1040         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1041                "%s\n",
1042                acc_stats_border, acc_stats_border);
1043 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1044         if (total_recv > 0)
1045                 printf("\n  CPU cycles/packet=%u (total cycles="
1046                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1047                        (unsigned int)(fwd_cycles / total_recv),
1048                        fwd_cycles, total_recv);
1049 #endif
1050         printf("\nDone.\n");
1051         test_done = 1;
1052 }
1053
1054 static int
1055 all_ports_started(void)
1056 {
1057         portid_t pi;
1058         struct rte_port *port;
1059
1060         for (pi = 0; pi < nb_ports; pi++) {
1061                 port = &ports[pi];
1062                 /* Check if there is a port which is not started */
1063                 if (port->port_status != RTE_PORT_STARTED)
1064                         return 0;
1065         }
1066
1067         /* No port is not started */
1068         return 1;
1069 }
1070
1071 void
1072 start_port(portid_t pid)
1073 {
1074         int diag, need_check_link_status = 0;
1075         portid_t pi;
1076         queueid_t qi;
1077         struct rte_port *port;
1078
1079         if (test_done == 0) {
1080                 printf("Please stop forwarding first\n");
1081                 return;
1082         }
1083
1084         if (init_fwd_streams() < 0) {
1085                 printf("Fail from init_fwd_streams()\n");
1086                 return;
1087         }
1088         
1089         if(dcb_config)
1090                 dcb_test = 1;
1091         for (pi = 0; pi < nb_ports; pi++) {
1092                 if (pid < nb_ports && pid != pi)
1093                         continue;
1094
1095                 port = &ports[pi];
1096                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1097                                                  RTE_PORT_HANDLING) == 0) {
1098                         printf("Port %d is now not stopped\n", pi);
1099                         continue;
1100                 }
1101
1102                 if (port->need_reconfig > 0) {
1103                         port->need_reconfig = 0;
1104
1105                         printf("Configuring Port %d\n", pi);
1106                         /* configure port */
1107                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1108                                                 &(port->dev_conf));
1109                         if (diag != 0) {
1110                                 if (rte_atomic16_cmpset(&(port->port_status),
1111                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1112                                         printf("Port %d can not be set back "
1113                                                         "to stopped\n", pi);
1114                                 printf("Fail to configure port %d\n", pi);
1115                                 /* try to reconfigure port next time */
1116                                 port->need_reconfig = 1;
1117                                 return;
1118                         }
1119                 }
1120
1121                 if (port->need_reconfig_queues > 0) {
1122                         port->need_reconfig_queues = 0;
1123
1124                         /* setup tx queues */
1125                         for (qi = 0; qi < nb_txq; qi++) {
1126                                 diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
1127                                         port->socket_id, &(port->tx_conf));
1128                                 if (diag == 0)
1129                                         continue;
1130
1131                                 /* Fail to setup tx queue, return */
1132                                 if (rte_atomic16_cmpset(&(port->port_status),
1133                                                         RTE_PORT_HANDLING,
1134                                                         RTE_PORT_STOPPED) == 0)
1135                                         printf("Port %d can not be set back "
1136                                                         "to stopped\n", pi);
1137                                 printf("Fail to configure port %d tx queues\n", pi);
1138                                 /* try to reconfigure queues next time */
1139                                 port->need_reconfig_queues = 1;
1140                                 return;
1141                         }
1142                         /* setup rx queues */
1143                         for (qi = 0; qi < nb_rxq; qi++) {
1144                                 diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd,
1145                                         port->socket_id, &(port->rx_conf),
1146                                         mbuf_pool_find(port->socket_id));
1147                                 if (diag == 0)
1148                                         continue;
1149
1150                                 /* Fail to setup rx queue, return */
1151                                 if (rte_atomic16_cmpset(&(port->port_status),
1152                                                         RTE_PORT_HANDLING,
1153                                                         RTE_PORT_STOPPED) == 0)
1154                                         printf("Port %d can not be set back "
1155                                                         "to stopped\n", pi);
1156                                 printf("Fail to configure port %d rx queues\n", pi);
1157                                 /* try to reconfigure queues next time */
1158                                 port->need_reconfig_queues = 1;
1159                                 return;
1160                         }
1161                 }
1162
1163                 /* start port */
1164                 if (rte_eth_dev_start(pi) < 0) {
1165                         printf("Fail to start port %d\n", pi);
1166
1167                         /* Fail to setup rx queue, return */
1168                         if (rte_atomic16_cmpset(&(port->port_status),
1169                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1170                                 printf("Port %d can not be set back to "
1171                                                         "stopped\n", pi);
1172                         continue;
1173                 }
1174
1175                 if (rte_atomic16_cmpset(&(port->port_status),
1176                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1177                         printf("Port %d can not be set into started\n", pi);
1178
1179                 /* at least one port started, need checking link status */
1180                 need_check_link_status = 1;
1181         }
1182
1183         if (need_check_link_status)
1184                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1185         else
1186                 printf("Please stop the ports first\n");
1187
1188         printf("Done\n");
1189 }
1190
1191 void
1192 stop_port(portid_t pid)
1193 {
1194         portid_t pi;
1195         struct rte_port *port;
1196         int need_check_link_status = 0;
1197
1198         if (test_done == 0) {
1199                 printf("Please stop forwarding first\n");
1200                 return;
1201         }
1202         if (dcb_test) {
1203                 dcb_test = 0;
1204                 dcb_config = 0;
1205         }
1206         printf("Stopping ports...\n");
1207
1208         for (pi = 0; pi < nb_ports; pi++) {
1209                 if (pid < nb_ports && pid != pi)
1210                         continue;
1211
1212                 port = &ports[pi];
1213                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1214                                                 RTE_PORT_HANDLING) == 0)
1215                         continue;
1216
1217                 rte_eth_dev_stop(pi);
1218
1219                 if (rte_atomic16_cmpset(&(port->port_status),
1220                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1221                         printf("Port %d can not be set into stopped\n", pi);
1222                 need_check_link_status = 1;
1223         }
1224         if (need_check_link_status)
1225                 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1226
1227         printf("Done\n");
1228 }
1229
1230 void
1231 close_port(portid_t pid)
1232 {
1233         portid_t pi;
1234         struct rte_port *port;
1235
1236         if (test_done == 0) {
1237                 printf("Please stop forwarding first\n");
1238                 return;
1239         }
1240
1241         printf("Closing ports...\n");
1242
1243         for (pi = 0; pi < nb_ports; pi++) {
1244                 if (pid < nb_ports && pid != pi)
1245                         continue;
1246
1247                 port = &ports[pi];
1248                 if (rte_atomic16_cmpset(&(port->port_status),
1249                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1250                         printf("Port %d is now not stopped\n", pi);
1251                         continue;
1252                 }
1253
1254                 rte_eth_dev_close(pi);
1255
1256                 if (rte_atomic16_cmpset(&(port->port_status),
1257                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1258                         printf("Port %d can not be set into stopped\n", pi);
1259         }
1260
1261         printf("Done\n");
1262 }
1263
1264 int
1265 all_ports_stopped(void)
1266 {
1267         portid_t pi;
1268         struct rte_port *port;
1269
1270         for (pi = 0; pi < nb_ports; pi++) {
1271                 port = &ports[pi];
1272                 if (port->port_status != RTE_PORT_STOPPED)
1273                         return 0;
1274         }
1275
1276         return 1;
1277 }
1278
1279 void
1280 pmd_test_exit(void)
1281 {
1282         portid_t pt_id;
1283
1284         for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1285                 printf("Stopping port %d...", pt_id);
1286                 fflush(stdout);
1287                 rte_eth_dev_close(pt_id);
1288                 printf("done\n");
1289         }
1290         printf("bye...\n");
1291 }
1292
1293 typedef void (*cmd_func_t)(void);
1294 struct pmd_test_command {
1295         const char *cmd_name;
1296         cmd_func_t cmd_func;
1297 };
1298
1299 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1300
1301 /* Check the link status of all ports in up to 9s, and print them finally */
1302 static void
1303 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1304 {
1305 #define CHECK_INTERVAL 100 /* 100ms */
1306 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1307         uint8_t portid, count, all_ports_up, print_flag = 0;
1308         struct rte_eth_link link;
1309
1310         printf("Checking link statuses...\n");
1311         fflush(stdout);
1312         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1313                 all_ports_up = 1;
1314                 for (portid = 0; portid < port_num; portid++) {
1315                         if ((port_mask & (1 << portid)) == 0)
1316                                 continue;
1317                         memset(&link, 0, sizeof(link));
1318                         rte_eth_link_get_nowait(portid, &link);
1319                         /* print link status if flag set */
1320                         if (print_flag == 1) {
1321                                 if (link.link_status)
1322                                         printf("Port %d Link Up - speed %u "
1323                                                 "Mbps - %s\n", (uint8_t)portid,
1324                                                 (unsigned)link.link_speed,
1325                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1326                                         ("full-duplex") : ("half-duplex\n"));
1327                                 else
1328                                         printf("Port %d Link Down\n",
1329                                                 (uint8_t)portid);
1330                                 continue;
1331                         }
1332                         /* clear all_ports_up flag if any link down */
1333                         if (link.link_status == 0) {
1334                                 all_ports_up = 0;
1335                                 break;
1336                         }
1337                 }
1338                 /* after finally printing all link status, get out */
1339                 if (print_flag == 1)
1340                         break;
1341
1342                 if (all_ports_up == 0) {
1343                         fflush(stdout);
1344                         rte_delay_ms(CHECK_INTERVAL);
1345                 }
1346
1347                 /* set the print_flag if all ports up or timeout */
1348                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1349                         print_flag = 1;
1350                 }
1351         }
1352 }
1353
1354 static int
1355 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1356 {
1357         uint16_t i;
1358         int diag;
1359         uint8_t mapping_found = 0;
1360
1361         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1362                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1363                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1364                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1365                                         tx_queue_stats_mappings[i].queue_id,
1366                                         tx_queue_stats_mappings[i].stats_counter_id);
1367                         if (diag != 0)
1368                                 return diag;
1369                         mapping_found = 1;
1370                 }
1371         }
1372         if (mapping_found)
1373                 port->tx_queue_stats_mapping_enabled = 1;
1374         return 0;
1375 }
1376
1377 static int
1378 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1379 {
1380         uint16_t i;
1381         int diag;
1382         uint8_t mapping_found = 0;
1383
1384         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1385                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1386                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1387                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1388                                         rx_queue_stats_mappings[i].queue_id,
1389                                         rx_queue_stats_mappings[i].stats_counter_id);
1390                         if (diag != 0)
1391                                 return diag;
1392                         mapping_found = 1;
1393                 }
1394         }
1395         if (mapping_found)
1396                 port->rx_queue_stats_mapping_enabled = 1;
1397         return 0;
1398 }
1399
1400 static void
1401 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1402 {
1403         int diag = 0;
1404
1405         diag = set_tx_queue_stats_mapping_registers(pi, port);
1406         if (diag != 0) {
1407                 if (diag == -ENOTSUP) {
1408                         port->tx_queue_stats_mapping_enabled = 0;
1409                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1410                 }
1411                 else
1412                         rte_exit(EXIT_FAILURE,
1413                                         "set_tx_queue_stats_mapping_registers "
1414                                         "failed for port id=%d diag=%d\n",
1415                                         pi, diag);
1416         }
1417
1418         diag = set_rx_queue_stats_mapping_registers(pi, port);
1419         if (diag != 0) {
1420                 if (diag == -ENOTSUP) {
1421                         port->rx_queue_stats_mapping_enabled = 0;
1422                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1423                 }
1424                 else
1425                         rte_exit(EXIT_FAILURE,
1426                                         "set_rx_queue_stats_mapping_registers "
1427                                         "failed for port id=%d diag=%d\n",
1428                                         pi, diag);
1429         }
1430 }
1431
1432 void
1433 init_port_config(void)
1434 {
1435         portid_t pid;
1436         struct rte_port *port;
1437
1438         for (pid = 0; pid < nb_ports; pid++) {
1439                 port = &ports[pid];
1440                 port->dev_conf.rxmode = rx_mode;
1441                 port->dev_conf.fdir_conf = fdir_conf;
1442                 if (nb_rxq > 0) {
1443                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1444                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1445                 } else {
1446                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1447                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1448                 }
1449                 port->rx_conf.rx_thresh = rx_thresh;
1450                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1451                 port->rx_conf.rx_drop_en = rx_drop_en;
1452                 port->tx_conf.tx_thresh = tx_thresh;
1453                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1454                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1455                 port->tx_conf.txq_flags = txq_flags;
1456
1457                 rte_eth_macaddr_get(pid, &port->eth_addr);
1458
1459                 map_port_queue_stats_mapping_registers(pid, port);
1460         }
1461 }
1462
1463 const uint16_t vlan_tags[] = {
1464                 0,  1,  2,  3,  4,  5,  6,  7,
1465                 8,  9, 10, 11,  12, 13, 14, 15,
1466                 16, 17, 18, 19, 20, 21, 22, 23,
1467                 24, 25, 26, 27, 28, 29, 30, 31
1468 };
1469
1470 static  int
1471 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1472 {
1473         uint8_t i;
1474  
1475         /*
1476          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1477          * given above, and the number of traffic classes available for use.
1478          */
1479         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1480                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1481                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1482  
1483                 /* VMDQ+DCB RX and TX configrations */
1484                 vmdq_rx_conf.enable_default_pool = 0;
1485                 vmdq_rx_conf.default_pool = 0;
1486                 vmdq_rx_conf.nb_queue_pools =
1487                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1488                 vmdq_tx_conf.nb_queue_pools =
1489                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1490  
1491                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1492                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1493                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1494                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1495                 }
1496                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1497                         vmdq_rx_conf.dcb_queue[i] = i;
1498                         vmdq_tx_conf.dcb_queue[i] = i;
1499                 }
1500  
1501                 /*set DCB mode of RX and TX of multiple queues*/
1502                 eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB;
1503                 eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX;
1504                 if (dcb_conf->pfc_en)
1505                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1506                 else
1507                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1508  
1509                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1510                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1511                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1512                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1513         }
1514         else {
1515                 struct rte_eth_dcb_rx_conf rx_conf;
1516                 struct rte_eth_dcb_tx_conf tx_conf;
1517  
1518                 /* queue mapping configuration of DCB RX and TX */
1519                 if (dcb_conf->num_tcs == ETH_4_TCS)
1520                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1521                 else
1522                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1523  
1524                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1525                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1526  
1527                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1528                         rx_conf.dcb_queue[i] = i;
1529                         tx_conf.dcb_queue[i] = i;
1530                 }
1531                 eth_conf->rxmode.mq_mode = ETH_DCB_RX;
1532                 eth_conf->txmode.mq_mode = ETH_DCB_TX;
1533                 if (dcb_conf->pfc_en)
1534                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1535                 else
1536                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1537                  
1538                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1539                                 sizeof(struct rte_eth_dcb_rx_conf)));
1540                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1541                                 sizeof(struct rte_eth_dcb_tx_conf)));
1542         }
1543
1544         return 0;
1545 }
1546
1547 int
1548 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1549 {
1550         struct rte_eth_conf port_conf;
1551         struct rte_port *rte_port;
1552         int retval;
1553         uint16_t nb_vlan;
1554         uint16_t i;
1555  
1556         /* rxq and txq configuration in dcb mode */
1557         nb_rxq = 128;
1558         nb_txq = 128;
1559         rx_free_thresh = 64;
1560  
1561         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1562         /* Enter DCB configuration status */
1563         dcb_config = 1;
1564  
1565         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1566         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1567         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1568         if (retval < 0)
1569                 return retval;
1570  
1571         rte_port = &ports[pid];
1572         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1573  
1574         rte_port->rx_conf.rx_thresh = rx_thresh;
1575         rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1576         rte_port->tx_conf.tx_thresh = tx_thresh;
1577         rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1578         rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1579         /* VLAN filter */
1580         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1581         for (i = 0; i < nb_vlan; i++){
1582                 rx_vft_set(pid, vlan_tags[i], 1);
1583         }
1584  
1585         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1586         map_port_queue_stats_mapping_registers(pid, rte_port);
1587  
1588         return 0;
1589 }
1590
1591 #ifdef RTE_EXEC_ENV_BAREMETAL
1592 #define main _main
1593 #endif
1594
1595 int
1596 main(int argc, char** argv)
1597 {
1598         int  diag;
1599         uint8_t port_id;
1600
1601         diag = rte_eal_init(argc, argv);
1602         if (diag < 0)
1603                 rte_panic("Cannot init EAL\n");
1604
1605         if (rte_pmd_init_all())
1606                 rte_panic("Cannot init PMD\n");
1607
1608         if (rte_eal_pci_probe())
1609                 rte_panic("Cannot probe PCI\n");
1610
1611         nb_ports = (portid_t) rte_eth_dev_count();
1612         if (nb_ports == 0)
1613                 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1614                                                         "check that "
1615                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1616                           "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1617                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1618                           "configuration file\n");
1619
1620         set_def_fwd_config();
1621         if (nb_lcores == 0)
1622                 rte_panic("Empty set of forwarding logical cores - check the "
1623                           "core mask supplied in the command parameters\n");
1624
1625         argc -= diag;
1626         argv += diag;
1627         if (argc > 1)
1628                 launch_args_parse(argc, argv);
1629
1630         if (nb_rxq > nb_txq)
1631                 printf("Warning: nb_rxq=%d enables RSS configuration, "
1632                        "but nb_txq=%d will prevent to fully test it.\n",
1633                        nb_rxq, nb_txq);
1634
1635         init_config();
1636         start_port(RTE_PORT_ALL);
1637
1638         /* set all ports to promiscuous mode by default */
1639         for (port_id = 0; port_id < nb_ports; port_id++)
1640                 rte_eth_promiscuous_enable(port_id);
1641
1642         if (interactive == 1)
1643                 prompt();
1644         else {
1645                 char c;
1646                 int rc;
1647
1648                 printf("No commandline core given, start packet forwarding\n");
1649                 start_packet_forwarding(0);
1650                 printf("Press enter to exit\n");
1651                 rc = read(0, &c, 1);
1652                 if (rc < 0)
1653                         return 1;
1654         }
1655
1656         return 0;
1657 }