10e4347f6a21a9fc65f493fd55c97c11b7bbaf2b
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301         },
302         .drop_queue = 127,
303 };
304
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
306
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
309
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
312
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
315
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint32_t port_mask);
319
320 /*
321  * Check if all the ports are started.
322  * If yes, return positive value. If not, return zero.
323  */
324 static int all_ports_started(void);
325
326 /*
327  * Find next enabled port
328  */
329 portid_t
330 find_next_port(portid_t p, struct rte_port *ports, int size)
331 {
332         if (ports == NULL)
333                 rte_exit(-EINVAL, "failed to find a next port id\n");
334
335         while ((p < size) && (ports[p].enabled == 0))
336                 p++;
337         return p;
338 }
339
340 /*
341  * Setup default configuration.
342  */
343 static void
344 set_default_fwd_lcores_config(void)
345 {
346         unsigned int i;
347         unsigned int nb_lc;
348
349         nb_lc = 0;
350         for (i = 0; i < RTE_MAX_LCORE; i++) {
351                 if (! rte_lcore_is_enabled(i))
352                         continue;
353                 if (i == rte_get_master_lcore())
354                         continue;
355                 fwd_lcores_cpuids[nb_lc++] = i;
356         }
357         nb_lcores = (lcoreid_t) nb_lc;
358         nb_cfg_lcores = nb_lcores;
359         nb_fwd_lcores = 1;
360 }
361
362 static void
363 set_def_peer_eth_addrs(void)
364 {
365         portid_t i;
366
367         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
368                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
369                 peer_eth_addrs[i].addr_bytes[5] = i;
370         }
371 }
372
373 static void
374 set_default_fwd_ports_config(void)
375 {
376         portid_t pt_id;
377
378         for (pt_id = 0; pt_id < nb_ports; pt_id++)
379                 fwd_ports_ids[pt_id] = pt_id;
380
381         nb_cfg_ports = nb_ports;
382         nb_fwd_ports = nb_ports;
383 }
384
385 void
386 set_def_fwd_config(void)
387 {
388         set_default_fwd_lcores_config();
389         set_def_peer_eth_addrs();
390         set_default_fwd_ports_config();
391 }
392
393 /*
394  * Configuration initialisation done once at init time.
395  */
396 struct mbuf_ctor_arg {
397         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
398         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
399 };
400
401 struct mbuf_pool_ctor_arg {
402         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
403 };
404
405 static void
406 testpmd_mbuf_ctor(struct rte_mempool *mp,
407                   void *opaque_arg,
408                   void *raw_mbuf,
409                   __attribute__((unused)) unsigned i)
410 {
411         struct mbuf_ctor_arg *mb_ctor_arg;
412         struct rte_mbuf    *mb;
413
414         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
415         mb = (struct rte_mbuf *) raw_mbuf;
416
417         mb->pool         = mp;
418         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
419         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
420                         mb_ctor_arg->seg_buf_offset);
421         mb->buf_len      = mb_ctor_arg->seg_buf_size;
422         mb->ol_flags     = 0;
423         mb->data_off     = RTE_PKTMBUF_HEADROOM;
424         mb->nb_segs      = 1;
425         mb->tx_offload   = 0;
426         mb->vlan_tci     = 0;
427         mb->hash.rss     = 0;
428 }
429
430 static void
431 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
432                        void *opaque_arg)
433 {
434         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
435         struct rte_pktmbuf_pool_private *mbp_priv;
436
437         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
438                 printf("%s(%s) private_data_size %d < %d\n",
439                        __func__, mp->name, (int) mp->private_data_size,
440                        (int) sizeof(struct rte_pktmbuf_pool_private));
441                 return;
442         }
443         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
444         mbp_priv = rte_mempool_get_priv(mp);
445         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
446         mbp_priv->mbuf_priv_size = 0;
447 }
448
449 static void
450 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
451                  unsigned int socket_id)
452 {
453         char pool_name[RTE_MEMPOOL_NAMESIZE];
454         struct rte_mempool *rte_mp;
455         struct mbuf_pool_ctor_arg mbp_ctor_arg;
456         struct mbuf_ctor_arg mb_ctor_arg;
457         uint32_t mb_size;
458
459         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
460                                                 mbuf_seg_size);
461         mb_ctor_arg.seg_buf_offset =
462                 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
463         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
464         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
465         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
466
467 #ifdef RTE_LIBRTE_PMD_XENVIRT
468         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
469                                    (unsigned) mb_mempool_cache,
470                                    sizeof(struct rte_pktmbuf_pool_private),
471                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
472                                    testpmd_mbuf_ctor, &mb_ctor_arg,
473                                    socket_id, 0);
474
475
476
477 #else
478         if (mp_anon != 0)
479                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
480                                     (unsigned) mb_mempool_cache,
481                                     sizeof(struct rte_pktmbuf_pool_private),
482                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
483                                     testpmd_mbuf_ctor, &mb_ctor_arg,
484                                     socket_id, 0);
485         else
486                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
487                                     (unsigned) mb_mempool_cache,
488                                     sizeof(struct rte_pktmbuf_pool_private),
489                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
490                                     testpmd_mbuf_ctor, &mb_ctor_arg,
491                                     socket_id, 0);
492
493 #endif
494
495         if (rte_mp == NULL) {
496                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
497                                                 "failed\n", socket_id);
498         } else if (verbose_level > 0) {
499                 rte_mempool_dump(stdout, rte_mp);
500         }
501 }
502
503 /*
504  * Check given socket id is valid or not with NUMA mode,
505  * if valid, return 0, else return -1
506  */
507 static int
508 check_socket_id(const unsigned int socket_id)
509 {
510         static int warning_once = 0;
511
512         if (socket_id >= MAX_SOCKET) {
513                 if (!warning_once && numa_support)
514                         printf("Warning: NUMA should be configured manually by"
515                                " using --port-numa-config and"
516                                " --ring-numa-config parameters along with"
517                                " --numa.\n");
518                 warning_once = 1;
519                 return -1;
520         }
521         return 0;
522 }
523
524 static void
525 init_config(void)
526 {
527         portid_t pid;
528         struct rte_port *port;
529         struct rte_mempool *mbp;
530         unsigned int nb_mbuf_per_pool;
531         lcoreid_t  lc_id;
532         uint8_t port_per_socket[MAX_SOCKET];
533
534         memset(port_per_socket,0,MAX_SOCKET);
535         /* Configuration of logical cores. */
536         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
537                                 sizeof(struct fwd_lcore *) * nb_lcores,
538                                 RTE_CACHE_LINE_SIZE);
539         if (fwd_lcores == NULL) {
540                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
541                                                         "failed\n", nb_lcores);
542         }
543         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
544                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
545                                                sizeof(struct fwd_lcore),
546                                                RTE_CACHE_LINE_SIZE);
547                 if (fwd_lcores[lc_id] == NULL) {
548                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
549                                                                 "failed\n");
550                 }
551                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
552         }
553
554         /*
555          * Create pools of mbuf.
556          * If NUMA support is disabled, create a single pool of mbuf in
557          * socket 0 memory by default.
558          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
559          *
560          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
561          * nb_txd can be configured at run time.
562          */
563         if (param_total_num_mbufs)
564                 nb_mbuf_per_pool = param_total_num_mbufs;
565         else {
566                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
567                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
568
569                 if (!numa_support)
570                         nb_mbuf_per_pool =
571                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572         }
573
574         if (!numa_support) {
575                 if (socket_num == UMA_NO_CONFIG)
576                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
577                 else
578                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
579                                                  socket_num);
580         }
581
582         FOREACH_PORT(pid, ports) {
583                 port = &ports[pid];
584                 rte_eth_dev_info_get(pid, &port->dev_info);
585
586                 if (numa_support) {
587                         if (port_numa[pid] != NUMA_NO_CONFIG)
588                                 port_per_socket[port_numa[pid]]++;
589                         else {
590                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
591
592                                 /* if socket_id is invalid, set to 0 */
593                                 if (check_socket_id(socket_id) < 0)
594                                         socket_id = 0;
595                                 port_per_socket[socket_id]++;
596                         }
597                 }
598
599                 /* set flag to initialize port/queue */
600                 port->need_reconfig = 1;
601                 port->need_reconfig_queues = 1;
602         }
603
604         if (numa_support) {
605                 uint8_t i;
606                 unsigned int nb_mbuf;
607
608                 if (param_total_num_mbufs)
609                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
610
611                 for (i = 0; i < MAX_SOCKET; i++) {
612                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
613                         if (nb_mbuf)
614                                 mbuf_pool_create(mbuf_data_size,
615                                                 nb_mbuf,i);
616                 }
617         }
618         init_port_config();
619
620         /*
621          * Records which Mbuf pool to use by each logical core, if needed.
622          */
623         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
624                 mbp = mbuf_pool_find(
625                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
626
627                 if (mbp == NULL)
628                         mbp = mbuf_pool_find(0);
629                 fwd_lcores[lc_id]->mbp = mbp;
630         }
631
632         /* Configuration of packet forwarding streams. */
633         if (init_fwd_streams() < 0)
634                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
635 }
636
637
638 void
639 reconfig(portid_t new_port_id, unsigned socket_id)
640 {
641         struct rte_port *port;
642
643         /* Reconfiguration of Ethernet ports. */
644         port = &ports[new_port_id];
645         rte_eth_dev_info_get(new_port_id, &port->dev_info);
646
647         /* set flag to initialize port/queue */
648         port->need_reconfig = 1;
649         port->need_reconfig_queues = 1;
650         port->socket_id = socket_id;
651
652         init_port_config();
653 }
654
655
656 int
657 init_fwd_streams(void)
658 {
659         portid_t pid;
660         struct rte_port *port;
661         streamid_t sm_id, nb_fwd_streams_new;
662
663         /* set socket id according to numa or not */
664         FOREACH_PORT(pid, ports) {
665                 port = &ports[pid];
666                 if (nb_rxq > port->dev_info.max_rx_queues) {
667                         printf("Fail: nb_rxq(%d) is greater than "
668                                 "max_rx_queues(%d)\n", nb_rxq,
669                                 port->dev_info.max_rx_queues);
670                         return -1;
671                 }
672                 if (nb_txq > port->dev_info.max_tx_queues) {
673                         printf("Fail: nb_txq(%d) is greater than "
674                                 "max_tx_queues(%d)\n", nb_txq,
675                                 port->dev_info.max_tx_queues);
676                         return -1;
677                 }
678                 if (numa_support) {
679                         if (port_numa[pid] != NUMA_NO_CONFIG)
680                                 port->socket_id = port_numa[pid];
681                         else {
682                                 port->socket_id = rte_eth_dev_socket_id(pid);
683
684                                 /* if socket_id is invalid, set to 0 */
685                                 if (check_socket_id(port->socket_id) < 0)
686                                         port->socket_id = 0;
687                         }
688                 }
689                 else {
690                         if (socket_num == UMA_NO_CONFIG)
691                                 port->socket_id = 0;
692                         else
693                                 port->socket_id = socket_num;
694                 }
695         }
696
697         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
698         if (nb_fwd_streams_new == nb_fwd_streams)
699                 return 0;
700         /* clear the old */
701         if (fwd_streams != NULL) {
702                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703                         if (fwd_streams[sm_id] == NULL)
704                                 continue;
705                         rte_free(fwd_streams[sm_id]);
706                         fwd_streams[sm_id] = NULL;
707                 }
708                 rte_free(fwd_streams);
709                 fwd_streams = NULL;
710         }
711
712         /* init new */
713         nb_fwd_streams = nb_fwd_streams_new;
714         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
716         if (fwd_streams == NULL)
717                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718                                                 "failed\n", nb_fwd_streams);
719
720         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
723                 if (fwd_streams[sm_id] == NULL)
724                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725                                                                 " failed\n");
726         }
727
728         return 0;
729 }
730
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
732 static void
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
734 {
735         unsigned int total_burst;
736         unsigned int nb_burst;
737         unsigned int burst_stats[3];
738         uint16_t pktnb_stats[3];
739         uint16_t nb_pkt;
740         int burst_percent[3];
741
742         /*
743          * First compute the total number of packet bursts and the
744          * two highest numbers of bursts of the same number of packets.
745          */
746         total_burst = 0;
747         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
751                 if (nb_burst == 0)
752                         continue;
753                 total_burst += nb_burst;
754                 if (nb_burst > burst_stats[0]) {
755                         burst_stats[1] = burst_stats[0];
756                         pktnb_stats[1] = pktnb_stats[0];
757                         burst_stats[0] = nb_burst;
758                         pktnb_stats[0] = nb_pkt;
759                 }
760         }
761         if (total_burst == 0)
762                 return;
763         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765                burst_percent[0], (int) pktnb_stats[0]);
766         if (burst_stats[0] == total_burst) {
767                 printf("]\n");
768                 return;
769         }
770         if (burst_stats[0] + burst_stats[1] == total_burst) {
771                 printf(" + %d%% of %d pkts]\n",
772                        100 - burst_percent[0], pktnb_stats[1]);
773                 return;
774         }
775         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779                 return;
780         }
781         printf(" + %d%% of %d pkts + %d%% of others]\n",
782                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
783 }
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785
786 static void
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
788 {
789         struct rte_port *port;
790         uint8_t i;
791
792         static const char *fwd_stats_border = "----------------------";
793
794         port = &ports[port_id];
795         printf("\n  %s Forward statistics for port %-2d %s\n",
796                fwd_stats_border, port_id, fwd_stats_border);
797
798         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
800                        "%-"PRIu64"\n",
801                        stats->ipackets, stats->imissed,
802                        (uint64_t) (stats->ipackets + stats->imissed));
803
804                 if (cur_fwd_eng == &csum_fwd_engine)
805                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
808                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
809                                "RX-error: %-"PRIu64"\n",
810                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
811                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812                 }
813
814                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815                        "%-"PRIu64"\n",
816                        stats->opackets, port->tx_dropped,
817                        (uint64_t) (stats->opackets + port->tx_dropped));
818         }
819         else {
820                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
821                        "%14"PRIu64"\n",
822                        stats->ipackets, stats->imissed,
823                        (uint64_t) (stats->ipackets + stats->imissed));
824
825                 if (cur_fwd_eng == &csum_fwd_engine)
826                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
827                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
828                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
829                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
830                                "    RX-error:%"PRIu64"\n",
831                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
832                         printf("  RX-nombufs:             %14"PRIu64"\n",
833                                stats->rx_nombuf);
834                 }
835
836                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
837                        "%14"PRIu64"\n",
838                        stats->opackets, port->tx_dropped,
839                        (uint64_t) (stats->opackets + port->tx_dropped));
840         }
841
842         /* Display statistics of XON/XOFF pause frames, if any. */
843         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
844              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
845                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
846                        stats->rx_pause_xoff, stats->rx_pause_xon);
847                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
848                        stats->tx_pause_xoff, stats->tx_pause_xon);
849         }
850
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852         if (port->rx_stream)
853                 pkt_burst_stats_display("RX",
854                         &port->rx_stream->rx_burst_stats);
855         if (port->tx_stream)
856                 pkt_burst_stats_display("TX",
857                         &port->tx_stream->tx_burst_stats);
858 #endif
859         /* stats fdir */
860         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
861                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
862                        stats->fdirmiss,
863                        stats->fdirmatch);
864
865         if (port->rx_queue_stats_mapping_enabled) {
866                 printf("\n");
867                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
868                         printf("  Stats reg %2d RX-packets:%14"PRIu64
869                                "     RX-errors:%14"PRIu64
870                                "    RX-bytes:%14"PRIu64"\n",
871                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
872                 }
873                 printf("\n");
874         }
875         if (port->tx_queue_stats_mapping_enabled) {
876                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
877                         printf("  Stats reg %2d TX-packets:%14"PRIu64
878                                "                                 TX-bytes:%14"PRIu64"\n",
879                                i, stats->q_opackets[i], stats->q_obytes[i]);
880                 }
881         }
882
883         printf("  %s--------------------------------%s\n",
884                fwd_stats_border, fwd_stats_border);
885 }
886
887 static void
888 fwd_stream_stats_display(streamid_t stream_id)
889 {
890         struct fwd_stream *fs;
891         static const char *fwd_top_stats_border = "-------";
892
893         fs = fwd_streams[stream_id];
894         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
895             (fs->fwd_dropped == 0))
896                 return;
897         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
898                "TX Port=%2d/Queue=%2d %s\n",
899                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
900                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
901         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
902                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
903
904         /* if checksum mode */
905         if (cur_fwd_eng == &csum_fwd_engine) {
906                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
907                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
908         }
909
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
912         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
913 #endif
914 }
915
916 static void
917 flush_fwd_rx_queues(void)
918 {
919         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
920         portid_t  rxp;
921         portid_t port_id;
922         queueid_t rxq;
923         uint16_t  nb_rx;
924         uint16_t  i;
925         uint8_t   j;
926
927         for (j = 0; j < 2; j++) {
928                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
929                         for (rxq = 0; rxq < nb_rxq; rxq++) {
930                                 port_id = fwd_ports_ids[rxp];
931                                 do {
932                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
933                                                 pkts_burst, MAX_PKT_BURST);
934                                         for (i = 0; i < nb_rx; i++)
935                                                 rte_pktmbuf_free(pkts_burst[i]);
936                                 } while (nb_rx > 0);
937                         }
938                 }
939                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
940         }
941 }
942
943 static void
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
945 {
946         struct fwd_stream **fsm;
947         streamid_t nb_fs;
948         streamid_t sm_id;
949
950         fsm = &fwd_streams[fc->stream_idx];
951         nb_fs = fc->stream_nb;
952         do {
953                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
954                         (*pkt_fwd)(fsm[sm_id]);
955         } while (! fc->stopped);
956 }
957
958 static int
959 start_pkt_forward_on_core(void *fwd_arg)
960 {
961         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
962                              cur_fwd_config.fwd_eng->packet_fwd);
963         return 0;
964 }
965
966 /*
967  * Run the TXONLY packet forwarding engine to send a single burst of packets.
968  * Used to start communication flows in network loopback test configurations.
969  */
970 static int
971 run_one_txonly_burst_on_core(void *fwd_arg)
972 {
973         struct fwd_lcore *fwd_lc;
974         struct fwd_lcore tmp_lcore;
975
976         fwd_lc = (struct fwd_lcore *) fwd_arg;
977         tmp_lcore = *fwd_lc;
978         tmp_lcore.stopped = 1;
979         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
980         return 0;
981 }
982
983 /*
984  * Launch packet forwarding:
985  *     - Setup per-port forwarding context.
986  *     - launch logical cores with their forwarding configuration.
987  */
988 static void
989 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
990 {
991         port_fwd_begin_t port_fwd_begin;
992         unsigned int i;
993         unsigned int lc_id;
994         int diag;
995
996         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
997         if (port_fwd_begin != NULL) {
998                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999                         (*port_fwd_begin)(fwd_ports_ids[i]);
1000         }
1001         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1002                 lc_id = fwd_lcores_cpuids[i];
1003                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1004                         fwd_lcores[i]->stopped = 0;
1005                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1006                                                      fwd_lcores[i], lc_id);
1007                         if (diag != 0)
1008                                 printf("launch lcore %u failed - diag=%d\n",
1009                                        lc_id, diag);
1010                 }
1011         }
1012 }
1013
1014 /*
1015  * Launch packet forwarding configuration.
1016  */
1017 void
1018 start_packet_forwarding(int with_tx_first)
1019 {
1020         port_fwd_begin_t port_fwd_begin;
1021         port_fwd_end_t  port_fwd_end;
1022         struct rte_port *port;
1023         unsigned int i;
1024         portid_t   pt_id;
1025         streamid_t sm_id;
1026
1027         if (all_ports_started() == 0) {
1028                 printf("Not all ports were started\n");
1029                 return;
1030         }
1031         if (test_done == 0) {
1032                 printf("Packet forwarding already started\n");
1033                 return;
1034         }
1035         if(dcb_test) {
1036                 for (i = 0; i < nb_fwd_ports; i++) {
1037                         pt_id = fwd_ports_ids[i];
1038                         port = &ports[pt_id];
1039                         if (!port->dcb_flag) {
1040                                 printf("In DCB mode, all forwarding ports must "
1041                                        "be configured in this mode.\n");
1042                                 return;
1043                         }
1044                 }
1045                 if (nb_fwd_lcores == 1) {
1046                         printf("In DCB mode,the nb forwarding cores "
1047                                "should be larger than 1.\n");
1048                         return;
1049                 }
1050         }
1051         test_done = 0;
1052
1053         if(!no_flush_rx)
1054                 flush_fwd_rx_queues();
1055
1056         fwd_config_setup();
1057         rxtx_config_display();
1058
1059         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1060                 pt_id = fwd_ports_ids[i];
1061                 port = &ports[pt_id];
1062                 rte_eth_stats_get(pt_id, &port->stats);
1063                 port->tx_dropped = 0;
1064
1065                 map_port_queue_stats_mapping_registers(pt_id, port);
1066         }
1067         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1068                 fwd_streams[sm_id]->rx_packets = 0;
1069                 fwd_streams[sm_id]->tx_packets = 0;
1070                 fwd_streams[sm_id]->fwd_dropped = 0;
1071                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1072                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1073
1074 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1075                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1076                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1077                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1078                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1079 #endif
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081                 fwd_streams[sm_id]->core_cycles = 0;
1082 #endif
1083         }
1084         if (with_tx_first) {
1085                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1086                 if (port_fwd_begin != NULL) {
1087                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1089                 }
1090                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1091                 rte_eal_mp_wait_lcore();
1092                 port_fwd_end = tx_only_engine.port_fwd_end;
1093                 if (port_fwd_end != NULL) {
1094                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095                                 (*port_fwd_end)(fwd_ports_ids[i]);
1096                 }
1097         }
1098         launch_packet_forwarding(start_pkt_forward_on_core);
1099 }
1100
1101 void
1102 stop_packet_forwarding(void)
1103 {
1104         struct rte_eth_stats stats;
1105         struct rte_port *port;
1106         port_fwd_end_t  port_fwd_end;
1107         int i;
1108         portid_t   pt_id;
1109         streamid_t sm_id;
1110         lcoreid_t  lc_id;
1111         uint64_t total_recv;
1112         uint64_t total_xmit;
1113         uint64_t total_rx_dropped;
1114         uint64_t total_tx_dropped;
1115         uint64_t total_rx_nombuf;
1116         uint64_t tx_dropped;
1117         uint64_t rx_bad_ip_csum;
1118         uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120         uint64_t fwd_cycles;
1121 #endif
1122         static const char *acc_stats_border = "+++++++++++++++";
1123
1124         if (all_ports_started() == 0) {
1125                 printf("Not all ports were started\n");
1126                 return;
1127         }
1128         if (test_done) {
1129                 printf("Packet forwarding not started\n");
1130                 return;
1131         }
1132         printf("Telling cores to stop...");
1133         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1134                 fwd_lcores[lc_id]->stopped = 1;
1135         printf("\nWaiting for lcores to finish...\n");
1136         rte_eal_mp_wait_lcore();
1137         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1138         if (port_fwd_end != NULL) {
1139                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140                         pt_id = fwd_ports_ids[i];
1141                         (*port_fwd_end)(pt_id);
1142                 }
1143         }
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145         fwd_cycles = 0;
1146 #endif
1147         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148                 if (cur_fwd_config.nb_fwd_streams >
1149                     cur_fwd_config.nb_fwd_ports) {
1150                         fwd_stream_stats_display(sm_id);
1151                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1152                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1153                 } else {
1154                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1155                                 fwd_streams[sm_id];
1156                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1157                                 fwd_streams[sm_id];
1158                 }
1159                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1160                 tx_dropped = (uint64_t) (tx_dropped +
1161                                          fwd_streams[sm_id]->fwd_dropped);
1162                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1163
1164                 rx_bad_ip_csum =
1165                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1166                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1167                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1168                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1169                                                         rx_bad_ip_csum;
1170
1171                 rx_bad_l4_csum =
1172                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1173                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1174                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1175                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1176                                                         rx_bad_l4_csum;
1177
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179                 fwd_cycles = (uint64_t) (fwd_cycles +
1180                                          fwd_streams[sm_id]->core_cycles);
1181 #endif
1182         }
1183         total_recv = 0;
1184         total_xmit = 0;
1185         total_rx_dropped = 0;
1186         total_tx_dropped = 0;
1187         total_rx_nombuf  = 0;
1188         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1189                 pt_id = fwd_ports_ids[i];
1190
1191                 port = &ports[pt_id];
1192                 rte_eth_stats_get(pt_id, &stats);
1193                 stats.ipackets -= port->stats.ipackets;
1194                 port->stats.ipackets = 0;
1195                 stats.opackets -= port->stats.opackets;
1196                 port->stats.opackets = 0;
1197                 stats.ibytes   -= port->stats.ibytes;
1198                 port->stats.ibytes = 0;
1199                 stats.obytes   -= port->stats.obytes;
1200                 port->stats.obytes = 0;
1201                 stats.imissed  -= port->stats.imissed;
1202                 port->stats.imissed = 0;
1203                 stats.oerrors  -= port->stats.oerrors;
1204                 port->stats.oerrors = 0;
1205                 stats.rx_nombuf -= port->stats.rx_nombuf;
1206                 port->stats.rx_nombuf = 0;
1207                 stats.fdirmatch -= port->stats.fdirmatch;
1208                 port->stats.rx_nombuf = 0;
1209                 stats.fdirmiss -= port->stats.fdirmiss;
1210                 port->stats.rx_nombuf = 0;
1211
1212                 total_recv += stats.ipackets;
1213                 total_xmit += stats.opackets;
1214                 total_rx_dropped += stats.imissed;
1215                 total_tx_dropped += port->tx_dropped;
1216                 total_rx_nombuf  += stats.rx_nombuf;
1217
1218                 fwd_port_stats_display(pt_id, &stats);
1219         }
1220         printf("\n  %s Accumulated forward statistics for all ports"
1221                "%s\n",
1222                acc_stats_border, acc_stats_border);
1223         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1224                "%-"PRIu64"\n"
1225                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1226                "%-"PRIu64"\n",
1227                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1228                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1229         if (total_rx_nombuf > 0)
1230                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1231         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1232                "%s\n",
1233                acc_stats_border, acc_stats_border);
1234 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1235         if (total_recv > 0)
1236                 printf("\n  CPU cycles/packet=%u (total cycles="
1237                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1238                        (unsigned int)(fwd_cycles / total_recv),
1239                        fwd_cycles, total_recv);
1240 #endif
1241         printf("\nDone.\n");
1242         test_done = 1;
1243 }
1244
1245 void
1246 dev_set_link_up(portid_t pid)
1247 {
1248         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1249                 printf("\nSet link up fail.\n");
1250 }
1251
1252 void
1253 dev_set_link_down(portid_t pid)
1254 {
1255         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1256                 printf("\nSet link down fail.\n");
1257 }
1258
1259 static int
1260 all_ports_started(void)
1261 {
1262         portid_t pi;
1263         struct rte_port *port;
1264
1265         FOREACH_PORT(pi, ports) {
1266                 port = &ports[pi];
1267                 /* Check if there is a port which is not started */
1268                 if (port->port_status != RTE_PORT_STARTED)
1269                         return 0;
1270         }
1271
1272         /* No port is not started */
1273         return 1;
1274 }
1275
1276 int
1277 all_ports_stopped(void)
1278 {
1279         portid_t pi;
1280         struct rte_port *port;
1281
1282         FOREACH_PORT(pi, ports) {
1283                 port = &ports[pi];
1284                 if (port->port_status != RTE_PORT_STOPPED)
1285                         return 0;
1286         }
1287
1288         return 1;
1289 }
1290
1291 int
1292 port_is_started(portid_t port_id)
1293 {
1294         if (port_id_is_invalid(port_id, ENABLED_WARN))
1295                 return 0;
1296
1297         if (ports[port_id].port_status != RTE_PORT_STARTED)
1298                 return 0;
1299
1300         return 1;
1301 }
1302
1303 static int
1304 port_is_closed(portid_t port_id)
1305 {
1306         if (port_id_is_invalid(port_id, ENABLED_WARN))
1307                 return 0;
1308
1309         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1310                 return 0;
1311
1312         return 1;
1313 }
1314
1315 int
1316 start_port(portid_t pid)
1317 {
1318         int diag, need_check_link_status = -1;
1319         portid_t pi;
1320         queueid_t qi;
1321         struct rte_port *port;
1322         struct ether_addr mac_addr;
1323
1324         if (test_done == 0) {
1325                 printf("Please stop forwarding first\n");
1326                 return -1;
1327         }
1328
1329         if (port_id_is_invalid(pid, ENABLED_WARN))
1330                 return 0;
1331
1332         if (init_fwd_streams() < 0) {
1333                 printf("Fail from init_fwd_streams()\n");
1334                 return -1;
1335         }
1336
1337         if(dcb_config)
1338                 dcb_test = 1;
1339         FOREACH_PORT(pi, ports) {
1340                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1341                         continue;
1342
1343                 need_check_link_status = 0;
1344                 port = &ports[pi];
1345                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1346                                                  RTE_PORT_HANDLING) == 0) {
1347                         printf("Port %d is now not stopped\n", pi);
1348                         continue;
1349                 }
1350
1351                 if (port->need_reconfig > 0) {
1352                         port->need_reconfig = 0;
1353
1354                         printf("Configuring Port %d (socket %u)\n", pi,
1355                                         port->socket_id);
1356                         /* configure port */
1357                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1358                                                 &(port->dev_conf));
1359                         if (diag != 0) {
1360                                 if (rte_atomic16_cmpset(&(port->port_status),
1361                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1362                                         printf("Port %d can not be set back "
1363                                                         "to stopped\n", pi);
1364                                 printf("Fail to configure port %d\n", pi);
1365                                 /* try to reconfigure port next time */
1366                                 port->need_reconfig = 1;
1367                                 return -1;
1368                         }
1369                 }
1370                 if (port->need_reconfig_queues > 0) {
1371                         port->need_reconfig_queues = 0;
1372                         /* setup tx queues */
1373                         for (qi = 0; qi < nb_txq; qi++) {
1374                                 if ((numa_support) &&
1375                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1376                                         diag = rte_eth_tx_queue_setup(pi, qi,
1377                                                 nb_txd,txring_numa[pi],
1378                                                 &(port->tx_conf));
1379                                 else
1380                                         diag = rte_eth_tx_queue_setup(pi, qi,
1381                                                 nb_txd,port->socket_id,
1382                                                 &(port->tx_conf));
1383
1384                                 if (diag == 0)
1385                                         continue;
1386
1387                                 /* Fail to setup tx queue, return */
1388                                 if (rte_atomic16_cmpset(&(port->port_status),
1389                                                         RTE_PORT_HANDLING,
1390                                                         RTE_PORT_STOPPED) == 0)
1391                                         printf("Port %d can not be set back "
1392                                                         "to stopped\n", pi);
1393                                 printf("Fail to configure port %d tx queues\n", pi);
1394                                 /* try to reconfigure queues next time */
1395                                 port->need_reconfig_queues = 1;
1396                                 return -1;
1397                         }
1398                         /* setup rx queues */
1399                         for (qi = 0; qi < nb_rxq; qi++) {
1400                                 if ((numa_support) &&
1401                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1402                                         struct rte_mempool * mp =
1403                                                 mbuf_pool_find(rxring_numa[pi]);
1404                                         if (mp == NULL) {
1405                                                 printf("Failed to setup RX queue:"
1406                                                         "No mempool allocation"
1407                                                         "on the socket %d\n",
1408                                                         rxring_numa[pi]);
1409                                                 return -1;
1410                                         }
1411
1412                                         diag = rte_eth_rx_queue_setup(pi, qi,
1413                                              nb_rxd,rxring_numa[pi],
1414                                              &(port->rx_conf),mp);
1415                                 }
1416                                 else
1417                                         diag = rte_eth_rx_queue_setup(pi, qi,
1418                                              nb_rxd,port->socket_id,
1419                                              &(port->rx_conf),
1420                                              mbuf_pool_find(port->socket_id));
1421
1422                                 if (diag == 0)
1423                                         continue;
1424
1425
1426                                 /* Fail to setup rx queue, return */
1427                                 if (rte_atomic16_cmpset(&(port->port_status),
1428                                                         RTE_PORT_HANDLING,
1429                                                         RTE_PORT_STOPPED) == 0)
1430                                         printf("Port %d can not be set back "
1431                                                         "to stopped\n", pi);
1432                                 printf("Fail to configure port %d rx queues\n", pi);
1433                                 /* try to reconfigure queues next time */
1434                                 port->need_reconfig_queues = 1;
1435                                 return -1;
1436                         }
1437                 }
1438                 /* start port */
1439                 if (rte_eth_dev_start(pi) < 0) {
1440                         printf("Fail to start port %d\n", pi);
1441
1442                         /* Fail to setup rx queue, return */
1443                         if (rte_atomic16_cmpset(&(port->port_status),
1444                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1445                                 printf("Port %d can not be set back to "
1446                                                         "stopped\n", pi);
1447                         continue;
1448                 }
1449
1450                 if (rte_atomic16_cmpset(&(port->port_status),
1451                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1452                         printf("Port %d can not be set into started\n", pi);
1453
1454                 rte_eth_macaddr_get(pi, &mac_addr);
1455                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1456                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1457                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1458                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1459
1460                 /* at least one port started, need checking link status */
1461                 need_check_link_status = 1;
1462         }
1463
1464         if (need_check_link_status == 1 && !no_link_check)
1465                 check_all_ports_link_status(RTE_PORT_ALL);
1466         else if (need_check_link_status == 0)
1467                 printf("Please stop the ports first\n");
1468
1469         printf("Done\n");
1470         return 0;
1471 }
1472
1473 void
1474 stop_port(portid_t pid)
1475 {
1476         portid_t pi;
1477         struct rte_port *port;
1478         int need_check_link_status = 0;
1479
1480         if (test_done == 0) {
1481                 printf("Please stop forwarding first\n");
1482                 return;
1483         }
1484         if (dcb_test) {
1485                 dcb_test = 0;
1486                 dcb_config = 0;
1487         }
1488
1489         if (port_id_is_invalid(pid, ENABLED_WARN))
1490                 return;
1491
1492         printf("Stopping ports...\n");
1493
1494         FOREACH_PORT(pi, ports) {
1495                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1496                         continue;
1497
1498                 port = &ports[pi];
1499                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1500                                                 RTE_PORT_HANDLING) == 0)
1501                         continue;
1502
1503                 rte_eth_dev_stop(pi);
1504
1505                 if (rte_atomic16_cmpset(&(port->port_status),
1506                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1507                         printf("Port %d can not be set into stopped\n", pi);
1508                 need_check_link_status = 1;
1509         }
1510         if (need_check_link_status && !no_link_check)
1511                 check_all_ports_link_status(RTE_PORT_ALL);
1512
1513         printf("Done\n");
1514 }
1515
1516 void
1517 close_port(portid_t pid)
1518 {
1519         portid_t pi;
1520         struct rte_port *port;
1521
1522         if (test_done == 0) {
1523                 printf("Please stop forwarding first\n");
1524                 return;
1525         }
1526
1527         if (port_id_is_invalid(pid, ENABLED_WARN))
1528                 return;
1529
1530         printf("Closing ports...\n");
1531
1532         FOREACH_PORT(pi, ports) {
1533                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1534                         continue;
1535
1536                 port = &ports[pi];
1537                 if (rte_atomic16_cmpset(&(port->port_status),
1538                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1539                         printf("Port %d is now not stopped\n", pi);
1540                         continue;
1541                 }
1542
1543                 rte_eth_dev_close(pi);
1544
1545                 if (rte_atomic16_cmpset(&(port->port_status),
1546                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1547                         printf("Port %d can not be set into stopped\n", pi);
1548         }
1549
1550         printf("Done\n");
1551 }
1552
1553 void
1554 attach_port(char *identifier)
1555 {
1556         portid_t i, j, pi = 0;
1557
1558         printf("Attaching a new port...\n");
1559
1560         if (identifier == NULL) {
1561                 printf("Invalid parameters are specified\n");
1562                 return;
1563         }
1564
1565         if (test_done == 0) {
1566                 printf("Please stop forwarding first\n");
1567                 return;
1568         }
1569
1570         if (rte_eth_dev_attach(identifier, &pi))
1571                 return;
1572
1573         ports[pi].enabled = 1;
1574         reconfig(pi, rte_eth_dev_socket_id(pi));
1575         rte_eth_promiscuous_enable(pi);
1576
1577         nb_ports = rte_eth_dev_count();
1578
1579         /* set_default_fwd_ports_config(); */
1580         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1581         i = 0;
1582         FOREACH_PORT(j, ports) {
1583                 fwd_ports_ids[i] = j;
1584                 i++;
1585         }
1586         nb_cfg_ports = nb_ports;
1587         nb_fwd_ports++;
1588
1589         ports[pi].port_status = RTE_PORT_STOPPED;
1590
1591         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1592         printf("Done\n");
1593 }
1594
1595 void
1596 detach_port(uint8_t port_id)
1597 {
1598         portid_t i, pi = 0;
1599         char name[RTE_ETH_NAME_MAX_LEN];
1600
1601         printf("Detaching a port...\n");
1602
1603         if (!port_is_closed(port_id)) {
1604                 printf("Please close port first\n");
1605                 return;
1606         }
1607
1608         rte_eth_promiscuous_disable(port_id);
1609
1610         if (rte_eth_dev_detach(port_id, name))
1611                 return;
1612
1613         ports[port_id].enabled = 0;
1614         nb_ports = rte_eth_dev_count();
1615
1616         /* set_default_fwd_ports_config(); */
1617         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1618         i = 0;
1619         FOREACH_PORT(pi, ports) {
1620                 fwd_ports_ids[i] = pi;
1621                 i++;
1622         }
1623         nb_cfg_ports = nb_ports;
1624         nb_fwd_ports--;
1625
1626         printf("Port '%s' is detached. Now total ports is %d\n",
1627                         name, nb_ports);
1628         printf("Done\n");
1629         return;
1630 }
1631
1632 void
1633 pmd_test_exit(void)
1634 {
1635         portid_t pt_id;
1636
1637         if (test_done == 0)
1638                 stop_packet_forwarding();
1639
1640         FOREACH_PORT(pt_id, ports) {
1641                 printf("Stopping port %d...", pt_id);
1642                 fflush(stdout);
1643                 rte_eth_dev_close(pt_id);
1644                 printf("done\n");
1645         }
1646         printf("bye...\n");
1647 }
1648
1649 typedef void (*cmd_func_t)(void);
1650 struct pmd_test_command {
1651         const char *cmd_name;
1652         cmd_func_t cmd_func;
1653 };
1654
1655 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1656
1657 /* Check the link status of all ports in up to 9s, and print them finally */
1658 static void
1659 check_all_ports_link_status(uint32_t port_mask)
1660 {
1661 #define CHECK_INTERVAL 100 /* 100ms */
1662 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1663         uint8_t portid, count, all_ports_up, print_flag = 0;
1664         struct rte_eth_link link;
1665
1666         printf("Checking link statuses...\n");
1667         fflush(stdout);
1668         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1669                 all_ports_up = 1;
1670                 FOREACH_PORT(portid, ports) {
1671                         if ((port_mask & (1 << portid)) == 0)
1672                                 continue;
1673                         memset(&link, 0, sizeof(link));
1674                         rte_eth_link_get_nowait(portid, &link);
1675                         /* print link status if flag set */
1676                         if (print_flag == 1) {
1677                                 if (link.link_status)
1678                                         printf("Port %d Link Up - speed %u "
1679                                                 "Mbps - %s\n", (uint8_t)portid,
1680                                                 (unsigned)link.link_speed,
1681                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1682                                         ("full-duplex") : ("half-duplex\n"));
1683                                 else
1684                                         printf("Port %d Link Down\n",
1685                                                 (uint8_t)portid);
1686                                 continue;
1687                         }
1688                         /* clear all_ports_up flag if any link down */
1689                         if (link.link_status == 0) {
1690                                 all_ports_up = 0;
1691                                 break;
1692                         }
1693                 }
1694                 /* after finally printing all link status, get out */
1695                 if (print_flag == 1)
1696                         break;
1697
1698                 if (all_ports_up == 0) {
1699                         fflush(stdout);
1700                         rte_delay_ms(CHECK_INTERVAL);
1701                 }
1702
1703                 /* set the print_flag if all ports up or timeout */
1704                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1705                         print_flag = 1;
1706                 }
1707         }
1708 }
1709
1710 static int
1711 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1712 {
1713         uint16_t i;
1714         int diag;
1715         uint8_t mapping_found = 0;
1716
1717         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1718                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1719                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1720                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1721                                         tx_queue_stats_mappings[i].queue_id,
1722                                         tx_queue_stats_mappings[i].stats_counter_id);
1723                         if (diag != 0)
1724                                 return diag;
1725                         mapping_found = 1;
1726                 }
1727         }
1728         if (mapping_found)
1729                 port->tx_queue_stats_mapping_enabled = 1;
1730         return 0;
1731 }
1732
1733 static int
1734 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1735 {
1736         uint16_t i;
1737         int diag;
1738         uint8_t mapping_found = 0;
1739
1740         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1741                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1742                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1743                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1744                                         rx_queue_stats_mappings[i].queue_id,
1745                                         rx_queue_stats_mappings[i].stats_counter_id);
1746                         if (diag != 0)
1747                                 return diag;
1748                         mapping_found = 1;
1749                 }
1750         }
1751         if (mapping_found)
1752                 port->rx_queue_stats_mapping_enabled = 1;
1753         return 0;
1754 }
1755
1756 static void
1757 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1758 {
1759         int diag = 0;
1760
1761         diag = set_tx_queue_stats_mapping_registers(pi, port);
1762         if (diag != 0) {
1763                 if (diag == -ENOTSUP) {
1764                         port->tx_queue_stats_mapping_enabled = 0;
1765                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1766                 }
1767                 else
1768                         rte_exit(EXIT_FAILURE,
1769                                         "set_tx_queue_stats_mapping_registers "
1770                                         "failed for port id=%d diag=%d\n",
1771                                         pi, diag);
1772         }
1773
1774         diag = set_rx_queue_stats_mapping_registers(pi, port);
1775         if (diag != 0) {
1776                 if (diag == -ENOTSUP) {
1777                         port->rx_queue_stats_mapping_enabled = 0;
1778                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1779                 }
1780                 else
1781                         rte_exit(EXIT_FAILURE,
1782                                         "set_rx_queue_stats_mapping_registers "
1783                                         "failed for port id=%d diag=%d\n",
1784                                         pi, diag);
1785         }
1786 }
1787
1788 static void
1789 rxtx_port_config(struct rte_port *port)
1790 {
1791         port->rx_conf = port->dev_info.default_rxconf;
1792         port->tx_conf = port->dev_info.default_txconf;
1793
1794         /* Check if any RX/TX parameters have been passed */
1795         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1796                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1797
1798         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1799                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1800
1801         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1802                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1803
1804         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1805                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1806
1807         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1808                 port->rx_conf.rx_drop_en = rx_drop_en;
1809
1810         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1811                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1812
1813         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1814                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1815
1816         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1817                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1818
1819         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1820                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1821
1822         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1823                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1824
1825         if (txq_flags != RTE_PMD_PARAM_UNSET)
1826                 port->tx_conf.txq_flags = txq_flags;
1827 }
1828
1829 void
1830 init_port_config(void)
1831 {
1832         portid_t pid;
1833         struct rte_port *port;
1834
1835         FOREACH_PORT(pid, ports) {
1836                 port = &ports[pid];
1837                 port->dev_conf.rxmode = rx_mode;
1838                 port->dev_conf.fdir_conf = fdir_conf;
1839                 if (nb_rxq > 1) {
1840                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1841                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1842                 } else {
1843                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1844                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1845                 }
1846
1847                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1848                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1849                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1850                         else
1851                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1852                 }
1853
1854                 if (port->dev_info.max_vfs != 0) {
1855                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1856                                 port->dev_conf.rxmode.mq_mode =
1857                                         ETH_MQ_RX_VMDQ_RSS;
1858                         else
1859                                 port->dev_conf.rxmode.mq_mode =
1860                                         ETH_MQ_RX_NONE;
1861
1862                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1863                 }
1864
1865                 rxtx_port_config(port);
1866
1867                 rte_eth_macaddr_get(pid, &port->eth_addr);
1868
1869                 map_port_queue_stats_mapping_registers(pid, port);
1870 #ifdef RTE_NIC_BYPASS
1871                 rte_eth_dev_bypass_init(pid);
1872 #endif
1873         }
1874 }
1875
1876 const uint16_t vlan_tags[] = {
1877                 0,  1,  2,  3,  4,  5,  6,  7,
1878                 8,  9, 10, 11,  12, 13, 14, 15,
1879                 16, 17, 18, 19, 20, 21, 22, 23,
1880                 24, 25, 26, 27, 28, 29, 30, 31
1881 };
1882
1883 static  int
1884 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1885 {
1886         uint8_t i;
1887
1888         /*
1889          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1890          * given above, and the number of traffic classes available for use.
1891          */
1892         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1893                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1894                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1895
1896                 /* VMDQ+DCB RX and TX configrations */
1897                 vmdq_rx_conf.enable_default_pool = 0;
1898                 vmdq_rx_conf.default_pool = 0;
1899                 vmdq_rx_conf.nb_queue_pools =
1900                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1901                 vmdq_tx_conf.nb_queue_pools =
1902                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1903
1904                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1905                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1906                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1907                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1908                 }
1909                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1910                         vmdq_rx_conf.dcb_queue[i] = i;
1911                         vmdq_tx_conf.dcb_queue[i] = i;
1912                 }
1913
1914                 /*set DCB mode of RX and TX of multiple queues*/
1915                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1916                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1917                 if (dcb_conf->pfc_en)
1918                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1919                 else
1920                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1921
1922                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1923                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1924                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1925                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1926         }
1927         else {
1928                 struct rte_eth_dcb_rx_conf rx_conf;
1929                 struct rte_eth_dcb_tx_conf tx_conf;
1930
1931                 /* queue mapping configuration of DCB RX and TX */
1932                 if (dcb_conf->num_tcs == ETH_4_TCS)
1933                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1934                 else
1935                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1936
1937                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1938                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1939
1940                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1941                         rx_conf.dcb_queue[i] = i;
1942                         tx_conf.dcb_queue[i] = i;
1943                 }
1944                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1945                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1946                 if (dcb_conf->pfc_en)
1947                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1948                 else
1949                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1950
1951                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1952                                 sizeof(struct rte_eth_dcb_rx_conf)));
1953                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1954                                 sizeof(struct rte_eth_dcb_tx_conf)));
1955         }
1956
1957         return 0;
1958 }
1959
1960 int
1961 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1962 {
1963         struct rte_eth_conf port_conf;
1964         struct rte_port *rte_port;
1965         int retval;
1966         uint16_t nb_vlan;
1967         uint16_t i;
1968
1969         /* rxq and txq configuration in dcb mode */
1970         nb_rxq = 128;
1971         nb_txq = 128;
1972         rx_free_thresh = 64;
1973
1974         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1975         /* Enter DCB configuration status */
1976         dcb_config = 1;
1977
1978         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1979         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1980         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1981         if (retval < 0)
1982                 return retval;
1983
1984         rte_port = &ports[pid];
1985         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1986
1987         rxtx_port_config(rte_port);
1988         /* VLAN filter */
1989         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1990         for (i = 0; i < nb_vlan; i++){
1991                 rx_vft_set(pid, vlan_tags[i], 1);
1992         }
1993
1994         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1995         map_port_queue_stats_mapping_registers(pid, rte_port);
1996
1997         rte_port->dcb_flag = 1;
1998
1999         return 0;
2000 }
2001
2002 static void
2003 init_port(void)
2004 {
2005         portid_t pid;
2006
2007         /* Configuration of Ethernet ports. */
2008         ports = rte_zmalloc("testpmd: ports",
2009                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2010                             RTE_CACHE_LINE_SIZE);
2011         if (ports == NULL) {
2012                 rte_exit(EXIT_FAILURE,
2013                                 "rte_zmalloc(%d struct rte_port) failed\n",
2014                                 RTE_MAX_ETHPORTS);
2015         }
2016
2017         /* enabled allocated ports */
2018         for (pid = 0; pid < nb_ports; pid++)
2019                 ports[pid].enabled = 1;
2020 }
2021
2022 int
2023 main(int argc, char** argv)
2024 {
2025         int  diag;
2026         uint8_t port_id;
2027
2028         diag = rte_eal_init(argc, argv);
2029         if (diag < 0)
2030                 rte_panic("Cannot init EAL\n");
2031
2032         nb_ports = (portid_t) rte_eth_dev_count();
2033         if (nb_ports == 0)
2034                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2035
2036         /* allocate port structures, and init them */
2037         init_port();
2038
2039         set_def_fwd_config();
2040         if (nb_lcores == 0)
2041                 rte_panic("Empty set of forwarding logical cores - check the "
2042                           "core mask supplied in the command parameters\n");
2043
2044         argc -= diag;
2045         argv += diag;
2046         if (argc > 1)
2047                 launch_args_parse(argc, argv);
2048
2049         if (nb_rxq > nb_txq)
2050                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2051                        "but nb_txq=%d will prevent to fully test it.\n",
2052                        nb_rxq, nb_txq);
2053
2054         init_config();
2055         if (start_port(RTE_PORT_ALL) != 0)
2056                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2057
2058         /* set all ports to promiscuous mode by default */
2059         FOREACH_PORT(port_id, ports)
2060                 rte_eth_promiscuous_enable(port_id);
2061
2062 #ifdef RTE_LIBRTE_CMDLINE
2063         if (interactive == 1) {
2064                 if (auto_start) {
2065                         printf("Start automatic packet forwarding\n");
2066                         start_packet_forwarding(0);
2067                 }
2068                 prompt();
2069         } else
2070 #endif
2071         {
2072                 char c;
2073                 int rc;
2074
2075                 printf("No commandline core given, start packet forwarding\n");
2076                 start_packet_forwarding(0);
2077                 printf("Press enter to exit\n");
2078                 rc = read(0, &c, 1);
2079                 if (rc < 0)
2080                         return 1;
2081         }
2082
2083         return 0;
2084 }