app/testpmd: support port hotplug
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79
80 #include "testpmd.h"
81 #include "mempool_osdep.h"
82
83 uint16_t verbose_level = 0; /**< Silent by default. */
84
85 /* use master core for command line ? */
86 uint8_t interactive = 0;
87 uint8_t auto_start = 0;
88
89 /*
90  * NUMA support configuration.
91  * When set, the NUMA support attempts to dispatch the allocation of the
92  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
93  * probed ports among the CPU sockets 0 and 1.
94  * Otherwise, all memory is allocated from CPU socket 0.
95  */
96 uint8_t numa_support = 0; /**< No numa support by default */
97
98 /*
99  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100  * not configured.
101  */
102 uint8_t socket_num = UMA_NO_CONFIG;
103
104 /*
105  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
106  */
107 uint8_t mp_anon = 0;
108
109 /*
110  * Record the Ethernet address of peer target ports to which packets are
111  * forwarded.
112  * Must be instanciated with the ethernet addresses of peer traffic generator
113  * ports.
114  */
115 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
116 portid_t nb_peer_eth_addrs = 0;
117
118 /*
119  * Probed Target Environment.
120  */
121 struct rte_port *ports;        /**< For all probed ethernet ports. */
122 portid_t nb_ports;             /**< Number of probed ethernet ports. */
123 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
124 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
125
126 /*
127  * Test Forwarding Configuration.
128  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
129  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
130  */
131 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
132 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
133 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
134 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
135
136 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
137 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
138
139 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
140 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
141
142 /*
143  * Forwarding engines.
144  */
145 struct fwd_engine * fwd_engines[] = {
146         &io_fwd_engine,
147         &mac_fwd_engine,
148         &mac_retry_fwd_engine,
149         &mac_swap_engine,
150         &flow_gen_engine,
151         &rx_only_engine,
152         &tx_only_engine,
153         &csum_fwd_engine,
154         &icmp_echo_engine,
155 #ifdef RTE_LIBRTE_IEEE1588
156         &ieee1588_fwd_engine,
157 #endif
158         NULL,
159 };
160
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163
164 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
165 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
166                                       * specified on command-line. */
167
168 /*
169  * Configuration of packet segments used by the "txonly" processing engine.
170  */
171 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
172 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
173         TXONLY_DEF_PACKET_LEN,
174 };
175 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185
186 /* DCB on and VT on mapping is default */
187 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
188
189 /*
190  * Configurable number of RX/TX queues.
191  */
192 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
193 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
194
195 /*
196  * Configurable number of RX/TX ring descriptors.
197  */
198 #define RTE_TEST_RX_DESC_DEFAULT 128
199 #define RTE_TEST_TX_DESC_DEFAULT 512
200 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
201 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202
203 #define RTE_PMD_PARAM_UNSET -1
204 /*
205  * Configurable values of RX and TX ring threshold registers.
206  */
207
208 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211
212 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
215
216 /*
217  * Configurable value of RX free threshold.
218  */
219 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
220
221 /*
222  * Configurable value of RX drop enable.
223  */
224 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
225
226 /*
227  * Configurable value of TX free threshold.
228  */
229 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
230
231 /*
232  * Configurable value of TX RS bit threshold.
233  */
234 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
235
236 /*
237  * Configurable value of TX queue flags.
238  */
239 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
240
241 /*
242  * Receive Side Scaling (RSS) configuration.
243  */
244 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
245
246 /*
247  * Port topology configuration
248  */
249 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
250
251 /*
252  * Avoids to flush all the RX streams before starts forwarding.
253  */
254 uint8_t no_flush_rx = 0; /* flush by default */
255
256 /*
257  * Avoids to check link status when starting/stopping a port.
258  */
259 uint8_t no_link_check = 0; /* check by default */
260
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
268
269 #endif
270
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276         .split_hdr_size = 0,
277         .header_split   = 0, /**< Header Split disabled. */
278         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285
286 struct rte_fdir_conf fdir_conf = {
287         .mode = RTE_FDIR_MODE_NONE,
288         .pballoc = RTE_FDIR_PBALLOC_64K,
289         .status = RTE_FDIR_REPORT_STATUS,
290         .mask = {
291                 .vlan_tci_mask = 0x0,
292                 .ipv4_mask     = {
293                         .src_ip = 0xFFFFFFFF,
294                         .dst_ip = 0xFFFFFFFF,
295                 },
296                 .ipv6_mask     = {
297                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299                 },
300                 .src_port_mask = 0xFFFF,
301                 .dst_port_mask = 0xFFFF,
302         },
303         .drop_queue = 127,
304 };
305
306 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
307
308 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
309 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
310
311 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
312 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
313
314 uint16_t nb_tx_queue_stats_mappings = 0;
315 uint16_t nb_rx_queue_stats_mappings = 0;
316
317 /* Forward function declarations */
318 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
319 static void check_all_ports_link_status(uint32_t port_mask);
320
321 /*
322  * Check if all the ports are started.
323  * If yes, return positive value. If not, return zero.
324  */
325 static int all_ports_started(void);
326
327 /*
328  * Find next enabled port
329  */
330 portid_t
331 find_next_port(portid_t p, struct rte_port *ports, int size)
332 {
333         if (ports == NULL)
334                 rte_exit(-EINVAL, "failed to find a next port id\n");
335
336         while ((ports[p].enabled == 0) && (p < size))
337                 p++;
338         return p;
339 }
340
341 /*
342  * Setup default configuration.
343  */
344 static void
345 set_default_fwd_lcores_config(void)
346 {
347         unsigned int i;
348         unsigned int nb_lc;
349
350         nb_lc = 0;
351         for (i = 0; i < RTE_MAX_LCORE; i++) {
352                 if (! rte_lcore_is_enabled(i))
353                         continue;
354                 if (i == rte_get_master_lcore())
355                         continue;
356                 fwd_lcores_cpuids[nb_lc++] = i;
357         }
358         nb_lcores = (lcoreid_t) nb_lc;
359         nb_cfg_lcores = nb_lcores;
360         nb_fwd_lcores = 1;
361 }
362
363 static void
364 set_def_peer_eth_addrs(void)
365 {
366         portid_t i;
367
368         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
369                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
370                 peer_eth_addrs[i].addr_bytes[5] = i;
371         }
372 }
373
374 static void
375 set_default_fwd_ports_config(void)
376 {
377         portid_t pt_id;
378
379         for (pt_id = 0; pt_id < nb_ports; pt_id++)
380                 fwd_ports_ids[pt_id] = pt_id;
381
382         nb_cfg_ports = nb_ports;
383         nb_fwd_ports = nb_ports;
384 }
385
386 void
387 set_def_fwd_config(void)
388 {
389         set_default_fwd_lcores_config();
390         set_def_peer_eth_addrs();
391         set_default_fwd_ports_config();
392 }
393
394 /*
395  * Configuration initialisation done once at init time.
396  */
397 struct mbuf_ctor_arg {
398         uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
399         uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
400 };
401
402 struct mbuf_pool_ctor_arg {
403         uint16_t seg_buf_size; /**< size of data segment in mbuf. */
404 };
405
406 static void
407 testpmd_mbuf_ctor(struct rte_mempool *mp,
408                   void *opaque_arg,
409                   void *raw_mbuf,
410                   __attribute__((unused)) unsigned i)
411 {
412         struct mbuf_ctor_arg *mb_ctor_arg;
413         struct rte_mbuf    *mb;
414
415         mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
416         mb = (struct rte_mbuf *) raw_mbuf;
417
418         mb->pool         = mp;
419         mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
420         mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
421                         mb_ctor_arg->seg_buf_offset);
422         mb->buf_len      = mb_ctor_arg->seg_buf_size;
423         mb->ol_flags     = 0;
424         mb->data_off     = RTE_PKTMBUF_HEADROOM;
425         mb->nb_segs      = 1;
426         mb->tx_offload   = 0;
427         mb->vlan_tci     = 0;
428         mb->hash.rss     = 0;
429 }
430
431 static void
432 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
433                        void *opaque_arg)
434 {
435         struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
436         struct rte_pktmbuf_pool_private *mbp_priv;
437
438         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
439                 printf("%s(%s) private_data_size %d < %d\n",
440                        __func__, mp->name, (int) mp->private_data_size,
441                        (int) sizeof(struct rte_pktmbuf_pool_private));
442                 return;
443         }
444         mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
445         mbp_priv = rte_mempool_get_priv(mp);
446         mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
447 }
448
449 static void
450 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
451                  unsigned int socket_id)
452 {
453         char pool_name[RTE_MEMPOOL_NAMESIZE];
454         struct rte_mempool *rte_mp;
455         struct mbuf_pool_ctor_arg mbp_ctor_arg;
456         struct mbuf_ctor_arg mb_ctor_arg;
457         uint32_t mb_size;
458
459         mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
460                                                 mbuf_seg_size);
461         mb_ctor_arg.seg_buf_offset =
462                 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
463         mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
464         mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
465         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
466
467 #ifdef RTE_LIBRTE_PMD_XENVIRT
468         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
469                                    (unsigned) mb_mempool_cache,
470                                    sizeof(struct rte_pktmbuf_pool_private),
471                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
472                                    testpmd_mbuf_ctor, &mb_ctor_arg,
473                                    socket_id, 0);
474
475
476
477 #else
478         if (mp_anon != 0)
479                 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
480                                     (unsigned) mb_mempool_cache,
481                                     sizeof(struct rte_pktmbuf_pool_private),
482                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
483                                     testpmd_mbuf_ctor, &mb_ctor_arg,
484                                     socket_id, 0);
485         else
486                 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
487                                     (unsigned) mb_mempool_cache,
488                                     sizeof(struct rte_pktmbuf_pool_private),
489                                     testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
490                                     testpmd_mbuf_ctor, &mb_ctor_arg,
491                                     socket_id, 0);
492
493 #endif
494
495         if (rte_mp == NULL) {
496                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
497                                                 "failed\n", socket_id);
498         } else if (verbose_level > 0) {
499                 rte_mempool_dump(stdout, rte_mp);
500         }
501 }
502
503 /*
504  * Check given socket id is valid or not with NUMA mode,
505  * if valid, return 0, else return -1
506  */
507 static int
508 check_socket_id(const unsigned int socket_id)
509 {
510         static int warning_once = 0;
511
512         if (socket_id >= MAX_SOCKET) {
513                 if (!warning_once && numa_support)
514                         printf("Warning: NUMA should be configured manually by"
515                                " using --port-numa-config and"
516                                " --ring-numa-config parameters along with"
517                                " --numa.\n");
518                 warning_once = 1;
519                 return -1;
520         }
521         return 0;
522 }
523
524 static void
525 init_config(void)
526 {
527         portid_t pid;
528         struct rte_port *port;
529         struct rte_mempool *mbp;
530         unsigned int nb_mbuf_per_pool;
531         lcoreid_t  lc_id;
532         uint8_t port_per_socket[MAX_SOCKET];
533
534         memset(port_per_socket,0,MAX_SOCKET);
535         /* Configuration of logical cores. */
536         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
537                                 sizeof(struct fwd_lcore *) * nb_lcores,
538                                 RTE_CACHE_LINE_SIZE);
539         if (fwd_lcores == NULL) {
540                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
541                                                         "failed\n", nb_lcores);
542         }
543         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
544                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
545                                                sizeof(struct fwd_lcore),
546                                                RTE_CACHE_LINE_SIZE);
547                 if (fwd_lcores[lc_id] == NULL) {
548                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
549                                                                 "failed\n");
550                 }
551                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
552         }
553
554         /*
555          * Create pools of mbuf.
556          * If NUMA support is disabled, create a single pool of mbuf in
557          * socket 0 memory by default.
558          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
559          *
560          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
561          * nb_txd can be configured at run time.
562          */
563         if (param_total_num_mbufs)
564                 nb_mbuf_per_pool = param_total_num_mbufs;
565         else {
566                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
567                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
568
569                 if (!numa_support)
570                         nb_mbuf_per_pool =
571                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572         }
573
574         if (!numa_support) {
575                 if (socket_num == UMA_NO_CONFIG)
576                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
577                 else
578                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
579                                                  socket_num);
580         }
581
582         /* Configuration of Ethernet ports. */
583         ports = rte_zmalloc("testpmd: ports",
584                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
585                             RTE_CACHE_LINE_SIZE);
586         if (ports == NULL) {
587                 rte_exit(EXIT_FAILURE,
588                                 "rte_zmalloc(%d struct rte_port) failed\n",
589                                 RTE_MAX_ETHPORTS);
590         }
591
592         /* enabled allocated ports */
593         for (pid = 0; pid < nb_ports; pid++)
594                 ports[pid].enabled = 1;
595
596         FOREACH_PORT(pid, ports) {
597                 port = &ports[pid];
598                 rte_eth_dev_info_get(pid, &port->dev_info);
599
600                 if (numa_support) {
601                         if (port_numa[pid] != NUMA_NO_CONFIG)
602                                 port_per_socket[port_numa[pid]]++;
603                         else {
604                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
605
606                                 /* if socket_id is invalid, set to 0 */
607                                 if (check_socket_id(socket_id) < 0)
608                                         socket_id = 0;
609                                 port_per_socket[socket_id]++;
610                         }
611                 }
612
613                 /* set flag to initialize port/queue */
614                 port->need_reconfig = 1;
615                 port->need_reconfig_queues = 1;
616         }
617
618         if (numa_support) {
619                 uint8_t i;
620                 unsigned int nb_mbuf;
621
622                 if (param_total_num_mbufs)
623                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
624
625                 for (i = 0; i < MAX_SOCKET; i++) {
626                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
627                         if (nb_mbuf)
628                                 mbuf_pool_create(mbuf_data_size,
629                                                 nb_mbuf,i);
630                 }
631         }
632         init_port_config();
633
634         /*
635          * Records which Mbuf pool to use by each logical core, if needed.
636          */
637         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
638                 mbp = mbuf_pool_find(
639                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
640
641                 if (mbp == NULL)
642                         mbp = mbuf_pool_find(0);
643                 fwd_lcores[lc_id]->mbp = mbp;
644         }
645
646         /* Configuration of packet forwarding streams. */
647         if (init_fwd_streams() < 0)
648                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
649 }
650
651
652 void
653 reconfig(portid_t new_port_id, unsigned socket_id)
654 {
655         struct rte_port *port;
656
657         /* Reconfiguration of Ethernet ports. */
658         port = &ports[new_port_id];
659         rte_eth_dev_info_get(new_port_id, &port->dev_info);
660
661         /* set flag to initialize port/queue */
662         port->need_reconfig = 1;
663         port->need_reconfig_queues = 1;
664         port->socket_id = socket_id;
665
666         init_port_config();
667 }
668
669
670 int
671 init_fwd_streams(void)
672 {
673         portid_t pid;
674         struct rte_port *port;
675         streamid_t sm_id, nb_fwd_streams_new;
676
677         /* set socket id according to numa or not */
678         FOREACH_PORT(pid, ports) {
679                 port = &ports[pid];
680                 if (nb_rxq > port->dev_info.max_rx_queues) {
681                         printf("Fail: nb_rxq(%d) is greater than "
682                                 "max_rx_queues(%d)\n", nb_rxq,
683                                 port->dev_info.max_rx_queues);
684                         return -1;
685                 }
686                 if (nb_txq > port->dev_info.max_tx_queues) {
687                         printf("Fail: nb_txq(%d) is greater than "
688                                 "max_tx_queues(%d)\n", nb_txq,
689                                 port->dev_info.max_tx_queues);
690                         return -1;
691                 }
692                 if (numa_support) {
693                         if (port_numa[pid] != NUMA_NO_CONFIG)
694                                 port->socket_id = port_numa[pid];
695                         else {
696                                 port->socket_id = rte_eth_dev_socket_id(pid);
697
698                                 /* if socket_id is invalid, set to 0 */
699                                 if (check_socket_id(port->socket_id) < 0)
700                                         port->socket_id = 0;
701                         }
702                 }
703                 else {
704                         if (socket_num == UMA_NO_CONFIG)
705                                 port->socket_id = 0;
706                         else
707                                 port->socket_id = socket_num;
708                 }
709         }
710
711         nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
712         if (nb_fwd_streams_new == nb_fwd_streams)
713                 return 0;
714         /* clear the old */
715         if (fwd_streams != NULL) {
716                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
717                         if (fwd_streams[sm_id] == NULL)
718                                 continue;
719                         rte_free(fwd_streams[sm_id]);
720                         fwd_streams[sm_id] = NULL;
721                 }
722                 rte_free(fwd_streams);
723                 fwd_streams = NULL;
724         }
725
726         /* init new */
727         nb_fwd_streams = nb_fwd_streams_new;
728         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
729                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
730         if (fwd_streams == NULL)
731                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
732                                                 "failed\n", nb_fwd_streams);
733
734         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
735                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
736                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
737                 if (fwd_streams[sm_id] == NULL)
738                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
739                                                                 " failed\n");
740         }
741
742         return 0;
743 }
744
745 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
746 static void
747 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
748 {
749         unsigned int total_burst;
750         unsigned int nb_burst;
751         unsigned int burst_stats[3];
752         uint16_t pktnb_stats[3];
753         uint16_t nb_pkt;
754         int burst_percent[3];
755
756         /*
757          * First compute the total number of packet bursts and the
758          * two highest numbers of bursts of the same number of packets.
759          */
760         total_burst = 0;
761         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
762         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
763         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
764                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
765                 if (nb_burst == 0)
766                         continue;
767                 total_burst += nb_burst;
768                 if (nb_burst > burst_stats[0]) {
769                         burst_stats[1] = burst_stats[0];
770                         pktnb_stats[1] = pktnb_stats[0];
771                         burst_stats[0] = nb_burst;
772                         pktnb_stats[0] = nb_pkt;
773                 }
774         }
775         if (total_burst == 0)
776                 return;
777         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
778         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
779                burst_percent[0], (int) pktnb_stats[0]);
780         if (burst_stats[0] == total_burst) {
781                 printf("]\n");
782                 return;
783         }
784         if (burst_stats[0] + burst_stats[1] == total_burst) {
785                 printf(" + %d%% of %d pkts]\n",
786                        100 - burst_percent[0], pktnb_stats[1]);
787                 return;
788         }
789         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
790         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
791         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
792                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
793                 return;
794         }
795         printf(" + %d%% of %d pkts + %d%% of others]\n",
796                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
797 }
798 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
799
800 static void
801 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
802 {
803         struct rte_port *port;
804         uint8_t i;
805
806         static const char *fwd_stats_border = "----------------------";
807
808         port = &ports[port_id];
809         printf("\n  %s Forward statistics for port %-2d %s\n",
810                fwd_stats_border, port_id, fwd_stats_border);
811
812         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
813                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
814                        "%-"PRIu64"\n",
815                        stats->ipackets, stats->imissed,
816                        (uint64_t) (stats->ipackets + stats->imissed));
817
818                 if (cur_fwd_eng == &csum_fwd_engine)
819                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
820                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
821                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
822                         printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
823                                "RX-error: %-"PRIu64"\n",
824                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
825                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
826                 }
827
828                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
829                        "%-"PRIu64"\n",
830                        stats->opackets, port->tx_dropped,
831                        (uint64_t) (stats->opackets + port->tx_dropped));
832         }
833         else {
834                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
835                        "%14"PRIu64"\n",
836                        stats->ipackets, stats->imissed,
837                        (uint64_t) (stats->ipackets + stats->imissed));
838
839                 if (cur_fwd_eng == &csum_fwd_engine)
840                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
841                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
842                 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
843                         printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
844                                "    RX-error:%"PRIu64"\n",
845                                stats->ibadcrc, stats->ibadlen, stats->ierrors);
846                         printf("  RX-nombufs:             %14"PRIu64"\n",
847                                stats->rx_nombuf);
848                 }
849
850                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
851                        "%14"PRIu64"\n",
852                        stats->opackets, port->tx_dropped,
853                        (uint64_t) (stats->opackets + port->tx_dropped));
854         }
855
856         /* Display statistics of XON/XOFF pause frames, if any. */
857         if ((stats->tx_pause_xon  | stats->rx_pause_xon |
858              stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
859                 printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
860                        stats->rx_pause_xoff, stats->rx_pause_xon);
861                 printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
862                        stats->tx_pause_xoff, stats->tx_pause_xon);
863         }
864
865 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
866         if (port->rx_stream)
867                 pkt_burst_stats_display("RX",
868                         &port->rx_stream->rx_burst_stats);
869         if (port->tx_stream)
870                 pkt_burst_stats_display("TX",
871                         &port->tx_stream->tx_burst_stats);
872 #endif
873         /* stats fdir */
874         if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
875                 printf("  Fdirmiss:%14"PRIu64"    Fdirmatch:%14"PRIu64"\n",
876                        stats->fdirmiss,
877                        stats->fdirmatch);
878
879         if (port->rx_queue_stats_mapping_enabled) {
880                 printf("\n");
881                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
882                         printf("  Stats reg %2d RX-packets:%14"PRIu64
883                                "     RX-errors:%14"PRIu64
884                                "    RX-bytes:%14"PRIu64"\n",
885                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
886                 }
887                 printf("\n");
888         }
889         if (port->tx_queue_stats_mapping_enabled) {
890                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
891                         printf("  Stats reg %2d TX-packets:%14"PRIu64
892                                "                                 TX-bytes:%14"PRIu64"\n",
893                                i, stats->q_opackets[i], stats->q_obytes[i]);
894                 }
895         }
896
897         printf("  %s--------------------------------%s\n",
898                fwd_stats_border, fwd_stats_border);
899 }
900
901 static void
902 fwd_stream_stats_display(streamid_t stream_id)
903 {
904         struct fwd_stream *fs;
905         static const char *fwd_top_stats_border = "-------";
906
907         fs = fwd_streams[stream_id];
908         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
909             (fs->fwd_dropped == 0))
910                 return;
911         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
912                "TX Port=%2d/Queue=%2d %s\n",
913                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
914                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
915         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
916                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
917
918         /* if checksum mode */
919         if (cur_fwd_eng == &csum_fwd_engine) {
920                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
921                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
922         }
923
924 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
925         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
926         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
927 #endif
928 }
929
930 static void
931 flush_fwd_rx_queues(void)
932 {
933         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
934         portid_t  rxp;
935         portid_t port_id;
936         queueid_t rxq;
937         uint16_t  nb_rx;
938         uint16_t  i;
939         uint8_t   j;
940
941         for (j = 0; j < 2; j++) {
942                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
943                         for (rxq = 0; rxq < nb_rxq; rxq++) {
944                                 port_id = fwd_ports_ids[rxp];
945                                 do {
946                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
947                                                 pkts_burst, MAX_PKT_BURST);
948                                         for (i = 0; i < nb_rx; i++)
949                                                 rte_pktmbuf_free(pkts_burst[i]);
950                                 } while (nb_rx > 0);
951                         }
952                 }
953                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
954         }
955 }
956
957 static void
958 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
959 {
960         struct fwd_stream **fsm;
961         streamid_t nb_fs;
962         streamid_t sm_id;
963
964         fsm = &fwd_streams[fc->stream_idx];
965         nb_fs = fc->stream_nb;
966         do {
967                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
968                         (*pkt_fwd)(fsm[sm_id]);
969         } while (! fc->stopped);
970 }
971
972 static int
973 start_pkt_forward_on_core(void *fwd_arg)
974 {
975         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
976                              cur_fwd_config.fwd_eng->packet_fwd);
977         return 0;
978 }
979
980 /*
981  * Run the TXONLY packet forwarding engine to send a single burst of packets.
982  * Used to start communication flows in network loopback test configurations.
983  */
984 static int
985 run_one_txonly_burst_on_core(void *fwd_arg)
986 {
987         struct fwd_lcore *fwd_lc;
988         struct fwd_lcore tmp_lcore;
989
990         fwd_lc = (struct fwd_lcore *) fwd_arg;
991         tmp_lcore = *fwd_lc;
992         tmp_lcore.stopped = 1;
993         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
994         return 0;
995 }
996
997 /*
998  * Launch packet forwarding:
999  *     - Setup per-port forwarding context.
1000  *     - launch logical cores with their forwarding configuration.
1001  */
1002 static void
1003 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1004 {
1005         port_fwd_begin_t port_fwd_begin;
1006         unsigned int i;
1007         unsigned int lc_id;
1008         int diag;
1009
1010         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1011         if (port_fwd_begin != NULL) {
1012                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1013                         (*port_fwd_begin)(fwd_ports_ids[i]);
1014         }
1015         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1016                 lc_id = fwd_lcores_cpuids[i];
1017                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1018                         fwd_lcores[i]->stopped = 0;
1019                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1020                                                      fwd_lcores[i], lc_id);
1021                         if (diag != 0)
1022                                 printf("launch lcore %u failed - diag=%d\n",
1023                                        lc_id, diag);
1024                 }
1025         }
1026 }
1027
1028 /*
1029  * Launch packet forwarding configuration.
1030  */
1031 void
1032 start_packet_forwarding(int with_tx_first)
1033 {
1034         port_fwd_begin_t port_fwd_begin;
1035         port_fwd_end_t  port_fwd_end;
1036         struct rte_port *port;
1037         unsigned int i;
1038         portid_t   pt_id;
1039         streamid_t sm_id;
1040
1041         if (all_ports_started() == 0) {
1042                 printf("Not all ports were started\n");
1043                 return;
1044         }
1045         if (test_done == 0) {
1046                 printf("Packet forwarding already started\n");
1047                 return;
1048         }
1049         if(dcb_test) {
1050                 for (i = 0; i < nb_fwd_ports; i++) {
1051                         pt_id = fwd_ports_ids[i];
1052                         port = &ports[pt_id];
1053                         if (!port->dcb_flag) {
1054                                 printf("In DCB mode, all forwarding ports must "
1055                                        "be configured in this mode.\n");
1056                                 return;
1057                         }
1058                 }
1059                 if (nb_fwd_lcores == 1) {
1060                         printf("In DCB mode,the nb forwarding cores "
1061                                "should be larger than 1.\n");
1062                         return;
1063                 }
1064         }
1065         test_done = 0;
1066
1067         if(!no_flush_rx)
1068                 flush_fwd_rx_queues();
1069
1070         fwd_config_setup();
1071         rxtx_config_display();
1072
1073         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1074                 pt_id = fwd_ports_ids[i];
1075                 port = &ports[pt_id];
1076                 rte_eth_stats_get(pt_id, &port->stats);
1077                 port->tx_dropped = 0;
1078
1079                 map_port_queue_stats_mapping_registers(pt_id, port);
1080         }
1081         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1082                 fwd_streams[sm_id]->rx_packets = 0;
1083                 fwd_streams[sm_id]->tx_packets = 0;
1084                 fwd_streams[sm_id]->fwd_dropped = 0;
1085                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1086                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1087
1088 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1089                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1090                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1091                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1092                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1093 #endif
1094 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1095                 fwd_streams[sm_id]->core_cycles = 0;
1096 #endif
1097         }
1098         if (with_tx_first) {
1099                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1100                 if (port_fwd_begin != NULL) {
1101                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1102                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1103                 }
1104                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1105                 rte_eal_mp_wait_lcore();
1106                 port_fwd_end = tx_only_engine.port_fwd_end;
1107                 if (port_fwd_end != NULL) {
1108                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1109                                 (*port_fwd_end)(fwd_ports_ids[i]);
1110                 }
1111         }
1112         launch_packet_forwarding(start_pkt_forward_on_core);
1113 }
1114
1115 void
1116 stop_packet_forwarding(void)
1117 {
1118         struct rte_eth_stats stats;
1119         struct rte_port *port;
1120         port_fwd_end_t  port_fwd_end;
1121         int i;
1122         portid_t   pt_id;
1123         streamid_t sm_id;
1124         lcoreid_t  lc_id;
1125         uint64_t total_recv;
1126         uint64_t total_xmit;
1127         uint64_t total_rx_dropped;
1128         uint64_t total_tx_dropped;
1129         uint64_t total_rx_nombuf;
1130         uint64_t tx_dropped;
1131         uint64_t rx_bad_ip_csum;
1132         uint64_t rx_bad_l4_csum;
1133 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1134         uint64_t fwd_cycles;
1135 #endif
1136         static const char *acc_stats_border = "+++++++++++++++";
1137
1138         if (all_ports_started() == 0) {
1139                 printf("Not all ports were started\n");
1140                 return;
1141         }
1142         if (test_done) {
1143                 printf("Packet forwarding not started\n");
1144                 return;
1145         }
1146         printf("Telling cores to stop...");
1147         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1148                 fwd_lcores[lc_id]->stopped = 1;
1149         printf("\nWaiting for lcores to finish...\n");
1150         rte_eal_mp_wait_lcore();
1151         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1152         if (port_fwd_end != NULL) {
1153                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1154                         pt_id = fwd_ports_ids[i];
1155                         (*port_fwd_end)(pt_id);
1156                 }
1157         }
1158 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1159         fwd_cycles = 0;
1160 #endif
1161         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1162                 if (cur_fwd_config.nb_fwd_streams >
1163                     cur_fwd_config.nb_fwd_ports) {
1164                         fwd_stream_stats_display(sm_id);
1165                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1166                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1167                 } else {
1168                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1169                                 fwd_streams[sm_id];
1170                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1171                                 fwd_streams[sm_id];
1172                 }
1173                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1174                 tx_dropped = (uint64_t) (tx_dropped +
1175                                          fwd_streams[sm_id]->fwd_dropped);
1176                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1177
1178                 rx_bad_ip_csum =
1179                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1180                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1181                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1182                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1183                                                         rx_bad_ip_csum;
1184
1185                 rx_bad_l4_csum =
1186                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1187                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1188                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1189                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1190                                                         rx_bad_l4_csum;
1191
1192 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1193                 fwd_cycles = (uint64_t) (fwd_cycles +
1194                                          fwd_streams[sm_id]->core_cycles);
1195 #endif
1196         }
1197         total_recv = 0;
1198         total_xmit = 0;
1199         total_rx_dropped = 0;
1200         total_tx_dropped = 0;
1201         total_rx_nombuf  = 0;
1202         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1203                 pt_id = fwd_ports_ids[i];
1204
1205                 port = &ports[pt_id];
1206                 rte_eth_stats_get(pt_id, &stats);
1207                 stats.ipackets -= port->stats.ipackets;
1208                 port->stats.ipackets = 0;
1209                 stats.opackets -= port->stats.opackets;
1210                 port->stats.opackets = 0;
1211                 stats.ibytes   -= port->stats.ibytes;
1212                 port->stats.ibytes = 0;
1213                 stats.obytes   -= port->stats.obytes;
1214                 port->stats.obytes = 0;
1215                 stats.imissed  -= port->stats.imissed;
1216                 port->stats.imissed = 0;
1217                 stats.oerrors  -= port->stats.oerrors;
1218                 port->stats.oerrors = 0;
1219                 stats.rx_nombuf -= port->stats.rx_nombuf;
1220                 port->stats.rx_nombuf = 0;
1221                 stats.fdirmatch -= port->stats.fdirmatch;
1222                 port->stats.rx_nombuf = 0;
1223                 stats.fdirmiss -= port->stats.fdirmiss;
1224                 port->stats.rx_nombuf = 0;
1225
1226                 total_recv += stats.ipackets;
1227                 total_xmit += stats.opackets;
1228                 total_rx_dropped += stats.imissed;
1229                 total_tx_dropped += port->tx_dropped;
1230                 total_rx_nombuf  += stats.rx_nombuf;
1231
1232                 fwd_port_stats_display(pt_id, &stats);
1233         }
1234         printf("\n  %s Accumulated forward statistics for all ports"
1235                "%s\n",
1236                acc_stats_border, acc_stats_border);
1237         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1238                "%-"PRIu64"\n"
1239                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1240                "%-"PRIu64"\n",
1241                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1242                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1243         if (total_rx_nombuf > 0)
1244                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1245         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1246                "%s\n",
1247                acc_stats_border, acc_stats_border);
1248 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1249         if (total_recv > 0)
1250                 printf("\n  CPU cycles/packet=%u (total cycles="
1251                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1252                        (unsigned int)(fwd_cycles / total_recv),
1253                        fwd_cycles, total_recv);
1254 #endif
1255         printf("\nDone.\n");
1256         test_done = 1;
1257 }
1258
1259 void
1260 dev_set_link_up(portid_t pid)
1261 {
1262         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1263                 printf("\nSet link up fail.\n");
1264 }
1265
1266 void
1267 dev_set_link_down(portid_t pid)
1268 {
1269         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1270                 printf("\nSet link down fail.\n");
1271 }
1272
1273 static int
1274 all_ports_started(void)
1275 {
1276         portid_t pi;
1277         struct rte_port *port;
1278
1279         FOREACH_PORT(pi, ports) {
1280                 port = &ports[pi];
1281                 /* Check if there is a port which is not started */
1282                 if (port->port_status != RTE_PORT_STARTED)
1283                         return 0;
1284         }
1285
1286         /* No port is not started */
1287         return 1;
1288 }
1289
1290 int
1291 all_ports_stopped(void)
1292 {
1293         portid_t pi;
1294         struct rte_port *port;
1295
1296         FOREACH_PORT(pi, ports) {
1297                 port = &ports[pi];
1298                 if (port->port_status != RTE_PORT_STOPPED)
1299                         return 0;
1300         }
1301
1302         return 1;
1303 }
1304
1305 int
1306 port_is_started(portid_t port_id)
1307 {
1308         if (port_id_is_invalid(port_id, ENABLED_WARN))
1309                 return 0;
1310
1311         if (ports[port_id].port_status != RTE_PORT_STARTED)
1312                 return 0;
1313
1314         return 1;
1315 }
1316
1317 static int
1318 port_is_closed(portid_t port_id)
1319 {
1320         if (port_id_is_invalid(port_id, ENABLED_WARN))
1321                 return 0;
1322
1323         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1324                 return 0;
1325
1326         return 1;
1327 }
1328
1329 int
1330 start_port(portid_t pid)
1331 {
1332         int diag, need_check_link_status = 0;
1333         portid_t pi;
1334         queueid_t qi;
1335         struct rte_port *port;
1336         struct ether_addr mac_addr;
1337
1338         if (test_done == 0) {
1339                 printf("Please stop forwarding first\n");
1340                 return -1;
1341         }
1342
1343         if (init_fwd_streams() < 0) {
1344                 printf("Fail from init_fwd_streams()\n");
1345                 return -1;
1346         }
1347
1348         if(dcb_config)
1349                 dcb_test = 1;
1350         FOREACH_PORT(pi, ports) {
1351                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1352                         continue;
1353
1354                 port = &ports[pi];
1355                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1356                                                  RTE_PORT_HANDLING) == 0) {
1357                         printf("Port %d is now not stopped\n", pi);
1358                         continue;
1359                 }
1360
1361                 if (port->need_reconfig > 0) {
1362                         port->need_reconfig = 0;
1363
1364                         printf("Configuring Port %d (socket %u)\n", pi,
1365                                         port->socket_id);
1366                         /* configure port */
1367                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1368                                                 &(port->dev_conf));
1369                         if (diag != 0) {
1370                                 if (rte_atomic16_cmpset(&(port->port_status),
1371                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1372                                         printf("Port %d can not be set back "
1373                                                         "to stopped\n", pi);
1374                                 printf("Fail to configure port %d\n", pi);
1375                                 /* try to reconfigure port next time */
1376                                 port->need_reconfig = 1;
1377                                 return -1;
1378                         }
1379                 }
1380                 if (port->need_reconfig_queues > 0) {
1381                         port->need_reconfig_queues = 0;
1382                         /* setup tx queues */
1383                         for (qi = 0; qi < nb_txq; qi++) {
1384                                 if ((numa_support) &&
1385                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1386                                         diag = rte_eth_tx_queue_setup(pi, qi,
1387                                                 nb_txd,txring_numa[pi],
1388                                                 &(port->tx_conf));
1389                                 else
1390                                         diag = rte_eth_tx_queue_setup(pi, qi,
1391                                                 nb_txd,port->socket_id,
1392                                                 &(port->tx_conf));
1393
1394                                 if (diag == 0)
1395                                         continue;
1396
1397                                 /* Fail to setup tx queue, return */
1398                                 if (rte_atomic16_cmpset(&(port->port_status),
1399                                                         RTE_PORT_HANDLING,
1400                                                         RTE_PORT_STOPPED) == 0)
1401                                         printf("Port %d can not be set back "
1402                                                         "to stopped\n", pi);
1403                                 printf("Fail to configure port %d tx queues\n", pi);
1404                                 /* try to reconfigure queues next time */
1405                                 port->need_reconfig_queues = 1;
1406                                 return -1;
1407                         }
1408                         /* setup rx queues */
1409                         for (qi = 0; qi < nb_rxq; qi++) {
1410                                 if ((numa_support) &&
1411                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1412                                         struct rte_mempool * mp =
1413                                                 mbuf_pool_find(rxring_numa[pi]);
1414                                         if (mp == NULL) {
1415                                                 printf("Failed to setup RX queue:"
1416                                                         "No mempool allocation"
1417                                                         "on the socket %d\n",
1418                                                         rxring_numa[pi]);
1419                                                 return -1;
1420                                         }
1421
1422                                         diag = rte_eth_rx_queue_setup(pi, qi,
1423                                              nb_rxd,rxring_numa[pi],
1424                                              &(port->rx_conf),mp);
1425                                 }
1426                                 else
1427                                         diag = rte_eth_rx_queue_setup(pi, qi,
1428                                              nb_rxd,port->socket_id,
1429                                              &(port->rx_conf),
1430                                              mbuf_pool_find(port->socket_id));
1431
1432                                 if (diag == 0)
1433                                         continue;
1434
1435
1436                                 /* Fail to setup rx queue, return */
1437                                 if (rte_atomic16_cmpset(&(port->port_status),
1438                                                         RTE_PORT_HANDLING,
1439                                                         RTE_PORT_STOPPED) == 0)
1440                                         printf("Port %d can not be set back "
1441                                                         "to stopped\n", pi);
1442                                 printf("Fail to configure port %d rx queues\n", pi);
1443                                 /* try to reconfigure queues next time */
1444                                 port->need_reconfig_queues = 1;
1445                                 return -1;
1446                         }
1447                 }
1448                 /* start port */
1449                 if (rte_eth_dev_start(pi) < 0) {
1450                         printf("Fail to start port %d\n", pi);
1451
1452                         /* Fail to setup rx queue, return */
1453                         if (rte_atomic16_cmpset(&(port->port_status),
1454                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1455                                 printf("Port %d can not be set back to "
1456                                                         "stopped\n", pi);
1457                         continue;
1458                 }
1459
1460                 if (rte_atomic16_cmpset(&(port->port_status),
1461                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1462                         printf("Port %d can not be set into started\n", pi);
1463
1464                 rte_eth_macaddr_get(pi, &mac_addr);
1465                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1466                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1467                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1468                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1469
1470                 /* at least one port started, need checking link status */
1471                 need_check_link_status = 1;
1472         }
1473
1474         if (need_check_link_status && !no_link_check)
1475                 check_all_ports_link_status(RTE_PORT_ALL);
1476         else
1477                 printf("Please stop the ports first\n");
1478
1479         printf("Done\n");
1480         return 0;
1481 }
1482
1483 void
1484 stop_port(portid_t pid)
1485 {
1486         portid_t pi;
1487         struct rte_port *port;
1488         int need_check_link_status = 0;
1489
1490         if (test_done == 0) {
1491                 printf("Please stop forwarding first\n");
1492                 return;
1493         }
1494         if (dcb_test) {
1495                 dcb_test = 0;
1496                 dcb_config = 0;
1497         }
1498         printf("Stopping ports...\n");
1499
1500         FOREACH_PORT(pi, ports) {
1501                 if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
1502                         continue;
1503
1504                 port = &ports[pi];
1505                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1506                                                 RTE_PORT_HANDLING) == 0)
1507                         continue;
1508
1509                 rte_eth_dev_stop(pi);
1510
1511                 if (rte_atomic16_cmpset(&(port->port_status),
1512                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1513                         printf("Port %d can not be set into stopped\n", pi);
1514                 need_check_link_status = 1;
1515         }
1516         if (need_check_link_status && !no_link_check)
1517                 check_all_ports_link_status(RTE_PORT_ALL);
1518
1519         printf("Done\n");
1520 }
1521
1522 void
1523 close_port(portid_t pid)
1524 {
1525         portid_t pi;
1526         struct rte_port *port;
1527
1528         if (test_done == 0) {
1529                 printf("Please stop forwarding first\n");
1530                 return;
1531         }
1532
1533         printf("Closing ports...\n");
1534
1535         FOREACH_PORT(pi, ports) {
1536                 if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
1537                         continue;
1538
1539                 port = &ports[pi];
1540                 if (rte_atomic16_cmpset(&(port->port_status),
1541                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1542                         printf("Port %d is now not stopped\n", pi);
1543                         continue;
1544                 }
1545
1546                 rte_eth_dev_close(pi);
1547
1548                 if (rte_atomic16_cmpset(&(port->port_status),
1549                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1550                         printf("Port %d can not be set into stopped\n", pi);
1551         }
1552
1553         printf("Done\n");
1554 }
1555
1556 void
1557 attach_port(char *identifier)
1558 {
1559         portid_t i, j, pi = 0;
1560
1561         printf("Attaching a new port...\n");
1562
1563         if (identifier == NULL) {
1564                 printf("Invalid parameters are specified\n");
1565                 return;
1566         }
1567
1568         if (test_done == 0) {
1569                 printf("Please stop forwarding first\n");
1570                 return;
1571         }
1572
1573         if (rte_eth_dev_attach(identifier, &pi))
1574                 return;
1575
1576         ports[pi].enabled = 1;
1577         reconfig(pi, rte_eth_dev_socket_id(pi));
1578         rte_eth_promiscuous_enable(pi);
1579
1580         nb_ports = rte_eth_dev_count();
1581
1582         /* set_default_fwd_ports_config(); */
1583         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1584         i = 0;
1585         FOREACH_PORT(j, ports) {
1586                 fwd_ports_ids[i] = j;
1587                 i++;
1588         }
1589         nb_cfg_ports = nb_ports;
1590         nb_fwd_ports++;
1591
1592         ports[pi].port_status = RTE_PORT_STOPPED;
1593
1594         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1595         printf("Done\n");
1596 }
1597
1598 void
1599 detach_port(uint8_t port_id)
1600 {
1601         portid_t i, pi = 0;
1602         char name[RTE_ETH_NAME_MAX_LEN];
1603
1604         printf("Detaching a port...\n");
1605
1606         if (!port_is_closed(port_id)) {
1607                 printf("Please close port first\n");
1608                 return;
1609         }
1610
1611         rte_eth_promiscuous_disable(port_id);
1612
1613         if (rte_eth_dev_detach(port_id, name))
1614                 return;
1615
1616         ports[port_id].enabled = 0;
1617         nb_ports = rte_eth_dev_count();
1618
1619         /* set_default_fwd_ports_config(); */
1620         bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1621         i = 0;
1622         FOREACH_PORT(pi, ports) {
1623                 fwd_ports_ids[i] = pi;
1624                 i++;
1625         }
1626         nb_cfg_ports = nb_ports;
1627         nb_fwd_ports--;
1628
1629         printf("Port '%s' is detached. Now total ports is %d\n",
1630                         name, nb_ports);
1631         printf("Done\n");
1632         return;
1633 }
1634
1635 void
1636 pmd_test_exit(void)
1637 {
1638         portid_t pt_id;
1639
1640         FOREACH_PORT(pt_id, ports) {
1641                 printf("Stopping port %d...", pt_id);
1642                 fflush(stdout);
1643                 rte_eth_dev_close(pt_id);
1644                 printf("done\n");
1645         }
1646         printf("bye...\n");
1647 }
1648
1649 typedef void (*cmd_func_t)(void);
1650 struct pmd_test_command {
1651         const char *cmd_name;
1652         cmd_func_t cmd_func;
1653 };
1654
1655 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1656
1657 /* Check the link status of all ports in up to 9s, and print them finally */
1658 static void
1659 check_all_ports_link_status(uint32_t port_mask)
1660 {
1661 #define CHECK_INTERVAL 100 /* 100ms */
1662 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1663         uint8_t portid, count, all_ports_up, print_flag = 0;
1664         struct rte_eth_link link;
1665
1666         printf("Checking link statuses...\n");
1667         fflush(stdout);
1668         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1669                 all_ports_up = 1;
1670                 FOREACH_PORT(portid, ports) {
1671                         if ((port_mask & (1 << portid)) == 0)
1672                                 continue;
1673                         memset(&link, 0, sizeof(link));
1674                         rte_eth_link_get_nowait(portid, &link);
1675                         /* print link status if flag set */
1676                         if (print_flag == 1) {
1677                                 if (link.link_status)
1678                                         printf("Port %d Link Up - speed %u "
1679                                                 "Mbps - %s\n", (uint8_t)portid,
1680                                                 (unsigned)link.link_speed,
1681                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1682                                         ("full-duplex") : ("half-duplex\n"));
1683                                 else
1684                                         printf("Port %d Link Down\n",
1685                                                 (uint8_t)portid);
1686                                 continue;
1687                         }
1688                         /* clear all_ports_up flag if any link down */
1689                         if (link.link_status == 0) {
1690                                 all_ports_up = 0;
1691                                 break;
1692                         }
1693                 }
1694                 /* after finally printing all link status, get out */
1695                 if (print_flag == 1)
1696                         break;
1697
1698                 if (all_ports_up == 0) {
1699                         fflush(stdout);
1700                         rte_delay_ms(CHECK_INTERVAL);
1701                 }
1702
1703                 /* set the print_flag if all ports up or timeout */
1704                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1705                         print_flag = 1;
1706                 }
1707         }
1708 }
1709
1710 static int
1711 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1712 {
1713         uint16_t i;
1714         int diag;
1715         uint8_t mapping_found = 0;
1716
1717         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1718                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1719                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1720                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1721                                         tx_queue_stats_mappings[i].queue_id,
1722                                         tx_queue_stats_mappings[i].stats_counter_id);
1723                         if (diag != 0)
1724                                 return diag;
1725                         mapping_found = 1;
1726                 }
1727         }
1728         if (mapping_found)
1729                 port->tx_queue_stats_mapping_enabled = 1;
1730         return 0;
1731 }
1732
1733 static int
1734 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1735 {
1736         uint16_t i;
1737         int diag;
1738         uint8_t mapping_found = 0;
1739
1740         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1741                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1742                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1743                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1744                                         rx_queue_stats_mappings[i].queue_id,
1745                                         rx_queue_stats_mappings[i].stats_counter_id);
1746                         if (diag != 0)
1747                                 return diag;
1748                         mapping_found = 1;
1749                 }
1750         }
1751         if (mapping_found)
1752                 port->rx_queue_stats_mapping_enabled = 1;
1753         return 0;
1754 }
1755
1756 static void
1757 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1758 {
1759         int diag = 0;
1760
1761         diag = set_tx_queue_stats_mapping_registers(pi, port);
1762         if (diag != 0) {
1763                 if (diag == -ENOTSUP) {
1764                         port->tx_queue_stats_mapping_enabled = 0;
1765                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1766                 }
1767                 else
1768                         rte_exit(EXIT_FAILURE,
1769                                         "set_tx_queue_stats_mapping_registers "
1770                                         "failed for port id=%d diag=%d\n",
1771                                         pi, diag);
1772         }
1773
1774         diag = set_rx_queue_stats_mapping_registers(pi, port);
1775         if (diag != 0) {
1776                 if (diag == -ENOTSUP) {
1777                         port->rx_queue_stats_mapping_enabled = 0;
1778                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1779                 }
1780                 else
1781                         rte_exit(EXIT_FAILURE,
1782                                         "set_rx_queue_stats_mapping_registers "
1783                                         "failed for port id=%d diag=%d\n",
1784                                         pi, diag);
1785         }
1786 }
1787
1788 static void
1789 rxtx_port_config(struct rte_port *port)
1790 {
1791         port->rx_conf = port->dev_info.default_rxconf;
1792         port->tx_conf = port->dev_info.default_txconf;
1793
1794         /* Check if any RX/TX parameters have been passed */
1795         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1796                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1797
1798         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1799                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1800
1801         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1802                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1803
1804         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1805                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1806
1807         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1808                 port->rx_conf.rx_drop_en = rx_drop_en;
1809
1810         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1811                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1812
1813         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1814                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1815
1816         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1817                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1818
1819         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1820                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1821
1822         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1823                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1824
1825         if (txq_flags != RTE_PMD_PARAM_UNSET)
1826                 port->tx_conf.txq_flags = txq_flags;
1827 }
1828
1829 void
1830 init_port_config(void)
1831 {
1832         portid_t pid;
1833         struct rte_port *port;
1834
1835         FOREACH_PORT(pid, ports) {
1836                 port = &ports[pid];
1837                 port->dev_conf.rxmode = rx_mode;
1838                 port->dev_conf.fdir_conf = fdir_conf;
1839                 if (nb_rxq > 1) {
1840                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1841                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1842                 } else {
1843                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1844                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1845                 }
1846
1847                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1848                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1849                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1850                         else
1851                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1852                 }
1853
1854                 if (port->dev_info.max_vfs != 0) {
1855                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1856                                 port->dev_conf.rxmode.mq_mode =
1857                                         ETH_MQ_RX_VMDQ_RSS;
1858                         else
1859                                 port->dev_conf.rxmode.mq_mode =
1860                                         ETH_MQ_RX_NONE;
1861
1862                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1863                 }
1864
1865                 rxtx_port_config(port);
1866
1867                 rte_eth_macaddr_get(pid, &port->eth_addr);
1868
1869                 map_port_queue_stats_mapping_registers(pid, port);
1870 #ifdef RTE_NIC_BYPASS
1871                 rte_eth_dev_bypass_init(pid);
1872 #endif
1873         }
1874 }
1875
1876 const uint16_t vlan_tags[] = {
1877                 0,  1,  2,  3,  4,  5,  6,  7,
1878                 8,  9, 10, 11,  12, 13, 14, 15,
1879                 16, 17, 18, 19, 20, 21, 22, 23,
1880                 24, 25, 26, 27, 28, 29, 30, 31
1881 };
1882
1883 static  int
1884 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1885 {
1886         uint8_t i;
1887
1888         /*
1889          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1890          * given above, and the number of traffic classes available for use.
1891          */
1892         if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1893                 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1894                 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1895
1896                 /* VMDQ+DCB RX and TX configrations */
1897                 vmdq_rx_conf.enable_default_pool = 0;
1898                 vmdq_rx_conf.default_pool = 0;
1899                 vmdq_rx_conf.nb_queue_pools =
1900                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1901                 vmdq_tx_conf.nb_queue_pools =
1902                         (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1903
1904                 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1905                 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1906                         vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1907                         vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1908                 }
1909                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1910                         vmdq_rx_conf.dcb_queue[i] = i;
1911                         vmdq_tx_conf.dcb_queue[i] = i;
1912                 }
1913
1914                 /*set DCB mode of RX and TX of multiple queues*/
1915                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1916                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1917                 if (dcb_conf->pfc_en)
1918                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1919                 else
1920                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1921
1922                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1923                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1924                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1925                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1926         }
1927         else {
1928                 struct rte_eth_dcb_rx_conf rx_conf;
1929                 struct rte_eth_dcb_tx_conf tx_conf;
1930
1931                 /* queue mapping configuration of DCB RX and TX */
1932                 if (dcb_conf->num_tcs == ETH_4_TCS)
1933                         dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1934                 else
1935                         dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1936
1937                 rx_conf.nb_tcs = dcb_conf->num_tcs;
1938                 tx_conf.nb_tcs = dcb_conf->num_tcs;
1939
1940                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1941                         rx_conf.dcb_queue[i] = i;
1942                         tx_conf.dcb_queue[i] = i;
1943                 }
1944                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1945                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1946                 if (dcb_conf->pfc_en)
1947                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1948                 else
1949                         eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1950
1951                 (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1952                                 sizeof(struct rte_eth_dcb_rx_conf)));
1953                 (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1954                                 sizeof(struct rte_eth_dcb_tx_conf)));
1955         }
1956
1957         return 0;
1958 }
1959
1960 int
1961 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1962 {
1963         struct rte_eth_conf port_conf;
1964         struct rte_port *rte_port;
1965         int retval;
1966         uint16_t nb_vlan;
1967         uint16_t i;
1968
1969         /* rxq and txq configuration in dcb mode */
1970         nb_rxq = 128;
1971         nb_txq = 128;
1972         rx_free_thresh = 64;
1973
1974         memset(&port_conf,0,sizeof(struct rte_eth_conf));
1975         /* Enter DCB configuration status */
1976         dcb_config = 1;
1977
1978         nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1979         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1980         retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1981         if (retval < 0)
1982                 return retval;
1983
1984         rte_port = &ports[pid];
1985         memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1986
1987         rxtx_port_config(rte_port);
1988         /* VLAN filter */
1989         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1990         for (i = 0; i < nb_vlan; i++){
1991                 rx_vft_set(pid, vlan_tags[i], 1);
1992         }
1993
1994         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1995         map_port_queue_stats_mapping_registers(pid, rte_port);
1996
1997         rte_port->dcb_flag = 1;
1998
1999         return 0;
2000 }
2001
2002 int
2003 main(int argc, char** argv)
2004 {
2005         int  diag;
2006         uint8_t port_id;
2007
2008         diag = rte_eal_init(argc, argv);
2009         if (diag < 0)
2010                 rte_panic("Cannot init EAL\n");
2011
2012         nb_ports = (portid_t) rte_eth_dev_count();
2013         if (nb_ports == 0)
2014                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2015
2016         set_def_fwd_config();
2017         if (nb_lcores == 0)
2018                 rte_panic("Empty set of forwarding logical cores - check the "
2019                           "core mask supplied in the command parameters\n");
2020
2021         argc -= diag;
2022         argv += diag;
2023         if (argc > 1)
2024                 launch_args_parse(argc, argv);
2025
2026         if (nb_rxq > nb_txq)
2027                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2028                        "but nb_txq=%d will prevent to fully test it.\n",
2029                        nb_rxq, nb_txq);
2030
2031         init_config();
2032         if (start_port(RTE_PORT_ALL) != 0)
2033                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2034
2035         /* set all ports to promiscuous mode by default */
2036         FOREACH_PORT(port_id, ports)
2037                 rte_eth_promiscuous_enable(port_id);
2038
2039 #ifdef RTE_LIBRTE_CMDLINE
2040         if (interactive == 1) {
2041                 if (auto_start) {
2042                         printf("Start automatic packet forwarding\n");
2043                         start_packet_forwarding(0);
2044                 }
2045                 prompt();
2046         } else
2047 #endif
2048         {
2049                 char c;
2050                 int rc;
2051
2052                 printf("No commandline core given, start packet forwarding\n");
2053                 start_packet_forwarding(0);
2054                 printf("Press enter to exit\n");
2055                 rc = read(0, &c, 1);
2056                 if (rc < 0)
2057                         return 1;
2058         }
2059
2060         return 0;
2061 }