app/testpmd: fix crash when mempool allocation fails
[dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81
82 #include "testpmd.h"
83
84 uint16_t verbose_level = 0; /**< Silent by default. */
85
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89
90 /*
91  * NUMA support configuration.
92  * When set, the NUMA support attempts to dispatch the allocation of the
93  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94  * probed ports among the CPU sockets 0 and 1.
95  * Otherwise, all memory is allocated from CPU socket 0.
96  */
97 uint8_t numa_support = 0; /**< No numa support by default */
98
99 /*
100  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101  * not configured.
102  */
103 uint8_t socket_num = UMA_NO_CONFIG;
104
105 /*
106  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
107  */
108 uint8_t mp_anon = 0;
109
110 /*
111  * Record the Ethernet address of peer target ports to which packets are
112  * forwarded.
113  * Must be instanciated with the ethernet addresses of peer traffic generator
114  * ports.
115  */
116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117 portid_t nb_peer_eth_addrs = 0;
118
119 /*
120  * Probed Target Environment.
121  */
122 struct rte_port *ports;        /**< For all probed ethernet ports. */
123 portid_t nb_ports;             /**< Number of probed ethernet ports. */
124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
126
127 /*
128  * Test Forwarding Configuration.
129  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
131  */
132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
135 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
136
137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
139
140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
142
143 /*
144  * Forwarding engines.
145  */
146 struct fwd_engine * fwd_engines[] = {
147         &io_fwd_engine,
148         &mac_fwd_engine,
149         &mac_swap_engine,
150         &flow_gen_engine,
151         &rx_only_engine,
152         &tx_only_engine,
153         &csum_fwd_engine,
154         &icmp_echo_engine,
155 #ifdef RTE_LIBRTE_IEEE1588
156         &ieee1588_fwd_engine,
157 #endif
158         NULL,
159 };
160
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint32_t retry_enabled;
164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
166
167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
169                                       * specified on command-line. */
170
171 /*
172  * Configuration of packet segments used by the "txonly" processing engine.
173  */
174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176         TXONLY_DEF_PACKET_LEN,
177 };
178 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
179
180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181 /**< Split policy for packets to TX. */
182
183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
185
186 /* current configuration is in DCB or not,0 means it is not in DCB mode */
187 uint8_t dcb_config = 0;
188
189 /* Whether the dcb is in testing status */
190 uint8_t dcb_test = 0;
191
192 /*
193  * Configurable number of RX/TX queues.
194  */
195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
197
198 /*
199  * Configurable number of RX/TX ring descriptors.
200  */
201 #define RTE_TEST_RX_DESC_DEFAULT 128
202 #define RTE_TEST_TX_DESC_DEFAULT 512
203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
205
206 #define RTE_PMD_PARAM_UNSET -1
207 /*
208  * Configurable values of RX and TX ring threshold registers.
209  */
210
211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
218
219 /*
220  * Configurable value of RX free threshold.
221  */
222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
223
224 /*
225  * Configurable value of RX drop enable.
226  */
227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
228
229 /*
230  * Configurable value of TX free threshold.
231  */
232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of TX RS bit threshold.
236  */
237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of TX queue flags.
241  */
242 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Receive Side Scaling (RSS) configuration.
246  */
247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
248
249 /*
250  * Port topology configuration
251  */
252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
253
254 /*
255  * Avoids to flush all the RX streams before starts forwarding.
256  */
257 uint8_t no_flush_rx = 0; /* flush by default */
258
259 /*
260  * Avoids to check link status when starting/stopping a port.
261  */
262 uint8_t no_link_check = 0; /* check by default */
263
264 /*
265  * NIC bypass mode configuration options.
266  */
267 #ifdef RTE_NIC_BYPASS
268
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271
272 #endif
273
274 /* default period is 1 second */
275 static uint64_t timer_period = 1;
276
277 /*
278  * Ethernet device configuration.
279  */
280 struct rte_eth_rxmode rx_mode = {
281         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
282         .split_hdr_size = 0,
283         .header_split   = 0, /**< Header Split disabled. */
284         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
285         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
286         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
287         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
288         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
289         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
290 };
291
292 struct rte_fdir_conf fdir_conf = {
293         .mode = RTE_FDIR_MODE_NONE,
294         .pballoc = RTE_FDIR_PBALLOC_64K,
295         .status = RTE_FDIR_REPORT_STATUS,
296         .mask = {
297                 .vlan_tci_mask = 0x0,
298                 .ipv4_mask     = {
299                         .src_ip = 0xFFFFFFFF,
300                         .dst_ip = 0xFFFFFFFF,
301                 },
302                 .ipv6_mask     = {
303                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
304                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
305                 },
306                 .src_port_mask = 0xFFFF,
307                 .dst_port_mask = 0xFFFF,
308                 .mac_addr_byte_mask = 0xFF,
309                 .tunnel_type_mask = 1,
310                 .tunnel_id_mask = 0xFFFFFFFF,
311         },
312         .drop_queue = 127,
313 };
314
315 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
316
317 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
318 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
319
320 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
321 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
322
323 uint16_t nb_tx_queue_stats_mappings = 0;
324 uint16_t nb_rx_queue_stats_mappings = 0;
325
326 unsigned max_socket = 0;
327
328 /* Forward function declarations */
329 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
330 static void check_all_ports_link_status(uint32_t port_mask);
331
332 /*
333  * Check if all the ports are started.
334  * If yes, return positive value. If not, return zero.
335  */
336 static int all_ports_started(void);
337
338 /*
339  * Find next enabled port
340  */
341 portid_t
342 find_next_port(portid_t p, struct rte_port *ports, int size)
343 {
344         if (ports == NULL)
345                 rte_exit(-EINVAL, "failed to find a next port id\n");
346
347         while ((p < size) && (ports[p].enabled == 0))
348                 p++;
349         return p;
350 }
351
352 /*
353  * Setup default configuration.
354  */
355 static void
356 set_default_fwd_lcores_config(void)
357 {
358         unsigned int i;
359         unsigned int nb_lc;
360         unsigned int sock_num;
361
362         nb_lc = 0;
363         for (i = 0; i < RTE_MAX_LCORE; i++) {
364                 sock_num = rte_lcore_to_socket_id(i) + 1;
365                 if (sock_num > max_socket) {
366                         if (sock_num > RTE_MAX_NUMA_NODES)
367                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
368                         max_socket = sock_num;
369                 }
370                 if (!rte_lcore_is_enabled(i))
371                         continue;
372                 if (i == rte_get_master_lcore())
373                         continue;
374                 fwd_lcores_cpuids[nb_lc++] = i;
375         }
376         nb_lcores = (lcoreid_t) nb_lc;
377         nb_cfg_lcores = nb_lcores;
378         nb_fwd_lcores = 1;
379 }
380
381 static void
382 set_def_peer_eth_addrs(void)
383 {
384         portid_t i;
385
386         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
387                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
388                 peer_eth_addrs[i].addr_bytes[5] = i;
389         }
390 }
391
392 static void
393 set_default_fwd_ports_config(void)
394 {
395         portid_t pt_id;
396
397         for (pt_id = 0; pt_id < nb_ports; pt_id++)
398                 fwd_ports_ids[pt_id] = pt_id;
399
400         nb_cfg_ports = nb_ports;
401         nb_fwd_ports = nb_ports;
402 }
403
404 void
405 set_def_fwd_config(void)
406 {
407         set_default_fwd_lcores_config();
408         set_def_peer_eth_addrs();
409         set_default_fwd_ports_config();
410 }
411
412 /*
413  * Configuration initialisation done once at init time.
414  */
415 static void
416 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
417                  unsigned int socket_id)
418 {
419         char pool_name[RTE_MEMPOOL_NAMESIZE];
420         struct rte_mempool *rte_mp = NULL;
421         uint32_t mb_size;
422
423         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
424         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
425
426         RTE_LOG(INFO, USER1,
427                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
428                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
429
430 #ifdef RTE_LIBRTE_PMD_XENVIRT
431         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
432                 (unsigned) mb_mempool_cache,
433                 sizeof(struct rte_pktmbuf_pool_private),
434                 rte_pktmbuf_pool_init, NULL,
435                 rte_pktmbuf_init, NULL,
436                 socket_id, 0);
437 #endif
438
439         /* if the former XEN allocation failed fall back to normal allocation */
440         if (rte_mp == NULL) {
441                 if (mp_anon != 0) {
442                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
443                                 mb_size, (unsigned) mb_mempool_cache,
444                                 sizeof(struct rte_pktmbuf_pool_private),
445                                 socket_id, 0);
446                         if (rte_mp == NULL)
447                                 goto err;
448
449                         if (rte_mempool_populate_anon(rte_mp) == 0) {
450                                 rte_mempool_free(rte_mp);
451                                 rte_mp = NULL;
452                                 goto err;
453                         }
454                         rte_pktmbuf_pool_init(rte_mp, NULL);
455                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
456                 } else {
457                         /* wrapper to rte_mempool_create() */
458                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
459                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
460                 }
461         }
462
463 err:
464         if (rte_mp == NULL) {
465                 rte_exit(EXIT_FAILURE,
466                         "Creation of mbuf pool for socket %u failed: %s\n",
467                         socket_id, rte_strerror(rte_errno));
468         } else if (verbose_level > 0) {
469                 rte_mempool_dump(stdout, rte_mp);
470         }
471 }
472
473 /*
474  * Check given socket id is valid or not with NUMA mode,
475  * if valid, return 0, else return -1
476  */
477 static int
478 check_socket_id(const unsigned int socket_id)
479 {
480         static int warning_once = 0;
481
482         if (socket_id >= max_socket) {
483                 if (!warning_once && numa_support)
484                         printf("Warning: NUMA should be configured manually by"
485                                " using --port-numa-config and"
486                                " --ring-numa-config parameters along with"
487                                " --numa.\n");
488                 warning_once = 1;
489                 return -1;
490         }
491         return 0;
492 }
493
494 static void
495 init_config(void)
496 {
497         portid_t pid;
498         struct rte_port *port;
499         struct rte_mempool *mbp;
500         unsigned int nb_mbuf_per_pool;
501         lcoreid_t  lc_id;
502         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
503
504         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
505         /* Configuration of logical cores. */
506         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
507                                 sizeof(struct fwd_lcore *) * nb_lcores,
508                                 RTE_CACHE_LINE_SIZE);
509         if (fwd_lcores == NULL) {
510                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
511                                                         "failed\n", nb_lcores);
512         }
513         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
514                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
515                                                sizeof(struct fwd_lcore),
516                                                RTE_CACHE_LINE_SIZE);
517                 if (fwd_lcores[lc_id] == NULL) {
518                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
519                                                                 "failed\n");
520                 }
521                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
522         }
523
524         /*
525          * Create pools of mbuf.
526          * If NUMA support is disabled, create a single pool of mbuf in
527          * socket 0 memory by default.
528          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
529          *
530          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
531          * nb_txd can be configured at run time.
532          */
533         if (param_total_num_mbufs)
534                 nb_mbuf_per_pool = param_total_num_mbufs;
535         else {
536                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
537                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
538
539                 if (!numa_support)
540                         nb_mbuf_per_pool =
541                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
542         }
543
544         if (!numa_support) {
545                 if (socket_num == UMA_NO_CONFIG)
546                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
547                 else
548                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
549                                                  socket_num);
550         }
551
552         FOREACH_PORT(pid, ports) {
553                 port = &ports[pid];
554                 rte_eth_dev_info_get(pid, &port->dev_info);
555
556                 if (numa_support) {
557                         if (port_numa[pid] != NUMA_NO_CONFIG)
558                                 port_per_socket[port_numa[pid]]++;
559                         else {
560                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
561
562                                 /* if socket_id is invalid, set to 0 */
563                                 if (check_socket_id(socket_id) < 0)
564                                         socket_id = 0;
565                                 port_per_socket[socket_id]++;
566                         }
567                 }
568
569                 /* set flag to initialize port/queue */
570                 port->need_reconfig = 1;
571                 port->need_reconfig_queues = 1;
572         }
573
574         if (numa_support) {
575                 uint8_t i;
576                 unsigned int nb_mbuf;
577
578                 if (param_total_num_mbufs)
579                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
580
581                 for (i = 0; i < max_socket; i++) {
582                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
583                         if (nb_mbuf)
584                                 mbuf_pool_create(mbuf_data_size,
585                                                 nb_mbuf,i);
586                 }
587         }
588         init_port_config();
589
590         /*
591          * Records which Mbuf pool to use by each logical core, if needed.
592          */
593         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
594                 mbp = mbuf_pool_find(
595                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
596
597                 if (mbp == NULL)
598                         mbp = mbuf_pool_find(0);
599                 fwd_lcores[lc_id]->mbp = mbp;
600         }
601
602         /* Configuration of packet forwarding streams. */
603         if (init_fwd_streams() < 0)
604                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
605
606         fwd_config_setup();
607 }
608
609
610 void
611 reconfig(portid_t new_port_id, unsigned socket_id)
612 {
613         struct rte_port *port;
614
615         /* Reconfiguration of Ethernet ports. */
616         port = &ports[new_port_id];
617         rte_eth_dev_info_get(new_port_id, &port->dev_info);
618
619         /* set flag to initialize port/queue */
620         port->need_reconfig = 1;
621         port->need_reconfig_queues = 1;
622         port->socket_id = socket_id;
623
624         init_port_config();
625 }
626
627
628 int
629 init_fwd_streams(void)
630 {
631         portid_t pid;
632         struct rte_port *port;
633         streamid_t sm_id, nb_fwd_streams_new;
634         queueid_t q;
635
636         /* set socket id according to numa or not */
637         FOREACH_PORT(pid, ports) {
638                 port = &ports[pid];
639                 if (nb_rxq > port->dev_info.max_rx_queues) {
640                         printf("Fail: nb_rxq(%d) is greater than "
641                                 "max_rx_queues(%d)\n", nb_rxq,
642                                 port->dev_info.max_rx_queues);
643                         return -1;
644                 }
645                 if (nb_txq > port->dev_info.max_tx_queues) {
646                         printf("Fail: nb_txq(%d) is greater than "
647                                 "max_tx_queues(%d)\n", nb_txq,
648                                 port->dev_info.max_tx_queues);
649                         return -1;
650                 }
651                 if (numa_support) {
652                         if (port_numa[pid] != NUMA_NO_CONFIG)
653                                 port->socket_id = port_numa[pid];
654                         else {
655                                 port->socket_id = rte_eth_dev_socket_id(pid);
656
657                                 /* if socket_id is invalid, set to 0 */
658                                 if (check_socket_id(port->socket_id) < 0)
659                                         port->socket_id = 0;
660                         }
661                 }
662                 else {
663                         if (socket_num == UMA_NO_CONFIG)
664                                 port->socket_id = 0;
665                         else
666                                 port->socket_id = socket_num;
667                 }
668         }
669
670         q = RTE_MAX(nb_rxq, nb_txq);
671         if (q == 0) {
672                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
673                 return -1;
674         }
675         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
676         if (nb_fwd_streams_new == nb_fwd_streams)
677                 return 0;
678         /* clear the old */
679         if (fwd_streams != NULL) {
680                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
681                         if (fwd_streams[sm_id] == NULL)
682                                 continue;
683                         rte_free(fwd_streams[sm_id]);
684                         fwd_streams[sm_id] = NULL;
685                 }
686                 rte_free(fwd_streams);
687                 fwd_streams = NULL;
688         }
689
690         /* init new */
691         nb_fwd_streams = nb_fwd_streams_new;
692         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
693                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
694         if (fwd_streams == NULL)
695                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
696                                                 "failed\n", nb_fwd_streams);
697
698         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
699                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
700                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
701                 if (fwd_streams[sm_id] == NULL)
702                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
703                                                                 " failed\n");
704         }
705
706         return 0;
707 }
708
709 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
710 static void
711 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
712 {
713         unsigned int total_burst;
714         unsigned int nb_burst;
715         unsigned int burst_stats[3];
716         uint16_t pktnb_stats[3];
717         uint16_t nb_pkt;
718         int burst_percent[3];
719
720         /*
721          * First compute the total number of packet bursts and the
722          * two highest numbers of bursts of the same number of packets.
723          */
724         total_burst = 0;
725         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
726         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
727         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
728                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
729                 if (nb_burst == 0)
730                         continue;
731                 total_burst += nb_burst;
732                 if (nb_burst > burst_stats[0]) {
733                         burst_stats[1] = burst_stats[0];
734                         pktnb_stats[1] = pktnb_stats[0];
735                         burst_stats[0] = nb_burst;
736                         pktnb_stats[0] = nb_pkt;
737                 }
738         }
739         if (total_burst == 0)
740                 return;
741         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
742         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
743                burst_percent[0], (int) pktnb_stats[0]);
744         if (burst_stats[0] == total_burst) {
745                 printf("]\n");
746                 return;
747         }
748         if (burst_stats[0] + burst_stats[1] == total_burst) {
749                 printf(" + %d%% of %d pkts]\n",
750                        100 - burst_percent[0], pktnb_stats[1]);
751                 return;
752         }
753         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
754         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
755         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
756                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
757                 return;
758         }
759         printf(" + %d%% of %d pkts + %d%% of others]\n",
760                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
761 }
762 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
763
764 static void
765 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
766 {
767         struct rte_port *port;
768         uint8_t i;
769
770         static const char *fwd_stats_border = "----------------------";
771
772         port = &ports[port_id];
773         printf("\n  %s Forward statistics for port %-2d %s\n",
774                fwd_stats_border, port_id, fwd_stats_border);
775
776         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
777                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
778                        "%-"PRIu64"\n",
779                        stats->ipackets, stats->imissed,
780                        (uint64_t) (stats->ipackets + stats->imissed));
781
782                 if (cur_fwd_eng == &csum_fwd_engine)
783                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
784                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
785                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
786                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
787                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
788                 }
789
790                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
791                        "%-"PRIu64"\n",
792                        stats->opackets, port->tx_dropped,
793                        (uint64_t) (stats->opackets + port->tx_dropped));
794         }
795         else {
796                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
797                        "%14"PRIu64"\n",
798                        stats->ipackets, stats->imissed,
799                        (uint64_t) (stats->ipackets + stats->imissed));
800
801                 if (cur_fwd_eng == &csum_fwd_engine)
802                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
803                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
804                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
805                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
806                         printf("  RX-nombufs:             %14"PRIu64"\n",
807                                stats->rx_nombuf);
808                 }
809
810                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
811                        "%14"PRIu64"\n",
812                        stats->opackets, port->tx_dropped,
813                        (uint64_t) (stats->opackets + port->tx_dropped));
814         }
815
816 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
817         if (port->rx_stream)
818                 pkt_burst_stats_display("RX",
819                         &port->rx_stream->rx_burst_stats);
820         if (port->tx_stream)
821                 pkt_burst_stats_display("TX",
822                         &port->tx_stream->tx_burst_stats);
823 #endif
824
825         if (port->rx_queue_stats_mapping_enabled) {
826                 printf("\n");
827                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828                         printf("  Stats reg %2d RX-packets:%14"PRIu64
829                                "     RX-errors:%14"PRIu64
830                                "    RX-bytes:%14"PRIu64"\n",
831                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
832                 }
833                 printf("\n");
834         }
835         if (port->tx_queue_stats_mapping_enabled) {
836                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
837                         printf("  Stats reg %2d TX-packets:%14"PRIu64
838                                "                                 TX-bytes:%14"PRIu64"\n",
839                                i, stats->q_opackets[i], stats->q_obytes[i]);
840                 }
841         }
842
843         printf("  %s--------------------------------%s\n",
844                fwd_stats_border, fwd_stats_border);
845 }
846
847 static void
848 fwd_stream_stats_display(streamid_t stream_id)
849 {
850         struct fwd_stream *fs;
851         static const char *fwd_top_stats_border = "-------";
852
853         fs = fwd_streams[stream_id];
854         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
855             (fs->fwd_dropped == 0))
856                 return;
857         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
858                "TX Port=%2d/Queue=%2d %s\n",
859                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
860                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
861         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
862                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
863
864         /* if checksum mode */
865         if (cur_fwd_eng == &csum_fwd_engine) {
866                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
867                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
868         }
869
870 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
871         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
872         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
873 #endif
874 }
875
876 static void
877 flush_fwd_rx_queues(void)
878 {
879         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
880         portid_t  rxp;
881         portid_t port_id;
882         queueid_t rxq;
883         uint16_t  nb_rx;
884         uint16_t  i;
885         uint8_t   j;
886         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
887
888         /* convert to number of cycles */
889         timer_period *= rte_get_timer_hz();
890
891         for (j = 0; j < 2; j++) {
892                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
893                         for (rxq = 0; rxq < nb_rxq; rxq++) {
894                                 port_id = fwd_ports_ids[rxp];
895                                 /**
896                                 * testpmd can stuck in the below do while loop
897                                 * if rte_eth_rx_burst() always returns nonzero
898                                 * packets. So timer is added to exit this loop
899                                 * after 1sec timer expiry.
900                                 */
901                                 prev_tsc = rte_rdtsc();
902                                 do {
903                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
904                                                 pkts_burst, MAX_PKT_BURST);
905                                         for (i = 0; i < nb_rx; i++)
906                                                 rte_pktmbuf_free(pkts_burst[i]);
907
908                                         cur_tsc = rte_rdtsc();
909                                         diff_tsc = cur_tsc - prev_tsc;
910                                         timer_tsc += diff_tsc;
911                                 } while ((nb_rx > 0) &&
912                                         (timer_tsc < timer_period));
913                                 timer_tsc = 0;
914                         }
915                 }
916                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
917         }
918 }
919
920 static void
921 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
922 {
923         struct fwd_stream **fsm;
924         streamid_t nb_fs;
925         streamid_t sm_id;
926
927         fsm = &fwd_streams[fc->stream_idx];
928         nb_fs = fc->stream_nb;
929         do {
930                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
931                         (*pkt_fwd)(fsm[sm_id]);
932         } while (! fc->stopped);
933 }
934
935 static int
936 start_pkt_forward_on_core(void *fwd_arg)
937 {
938         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
939                              cur_fwd_config.fwd_eng->packet_fwd);
940         return 0;
941 }
942
943 /*
944  * Run the TXONLY packet forwarding engine to send a single burst of packets.
945  * Used to start communication flows in network loopback test configurations.
946  */
947 static int
948 run_one_txonly_burst_on_core(void *fwd_arg)
949 {
950         struct fwd_lcore *fwd_lc;
951         struct fwd_lcore tmp_lcore;
952
953         fwd_lc = (struct fwd_lcore *) fwd_arg;
954         tmp_lcore = *fwd_lc;
955         tmp_lcore.stopped = 1;
956         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
957         return 0;
958 }
959
960 /*
961  * Launch packet forwarding:
962  *     - Setup per-port forwarding context.
963  *     - launch logical cores with their forwarding configuration.
964  */
965 static void
966 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
967 {
968         port_fwd_begin_t port_fwd_begin;
969         unsigned int i;
970         unsigned int lc_id;
971         int diag;
972
973         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
974         if (port_fwd_begin != NULL) {
975                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
976                         (*port_fwd_begin)(fwd_ports_ids[i]);
977         }
978         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
979                 lc_id = fwd_lcores_cpuids[i];
980                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
981                         fwd_lcores[i]->stopped = 0;
982                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
983                                                      fwd_lcores[i], lc_id);
984                         if (diag != 0)
985                                 printf("launch lcore %u failed - diag=%d\n",
986                                        lc_id, diag);
987                 }
988         }
989 }
990
991 /*
992  * Launch packet forwarding configuration.
993  */
994 void
995 start_packet_forwarding(int with_tx_first)
996 {
997         port_fwd_begin_t port_fwd_begin;
998         port_fwd_end_t  port_fwd_end;
999         struct rte_port *port;
1000         unsigned int i;
1001         portid_t   pt_id;
1002         streamid_t sm_id;
1003
1004         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1005                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1006
1007         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1008                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1009
1010         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1011                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1012                 (!nb_rxq || !nb_txq))
1013                 rte_exit(EXIT_FAILURE,
1014                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1015                         cur_fwd_eng->fwd_mode_name);
1016
1017         if (all_ports_started() == 0) {
1018                 printf("Not all ports were started\n");
1019                 return;
1020         }
1021         if (test_done == 0) {
1022                 printf("Packet forwarding already started\n");
1023                 return;
1024         }
1025
1026         if (init_fwd_streams() < 0) {
1027                 printf("Fail from init_fwd_streams()\n");
1028                 return;
1029         }
1030
1031         if(dcb_test) {
1032                 for (i = 0; i < nb_fwd_ports; i++) {
1033                         pt_id = fwd_ports_ids[i];
1034                         port = &ports[pt_id];
1035                         if (!port->dcb_flag) {
1036                                 printf("In DCB mode, all forwarding ports must "
1037                                        "be configured in this mode.\n");
1038                                 return;
1039                         }
1040                 }
1041                 if (nb_fwd_lcores == 1) {
1042                         printf("In DCB mode,the nb forwarding cores "
1043                                "should be larger than 1.\n");
1044                         return;
1045                 }
1046         }
1047         test_done = 0;
1048
1049         if(!no_flush_rx)
1050                 flush_fwd_rx_queues();
1051
1052         fwd_config_setup();
1053         pkt_fwd_config_display(&cur_fwd_config);
1054         rxtx_config_display();
1055
1056         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1057                 pt_id = fwd_ports_ids[i];
1058                 port = &ports[pt_id];
1059                 rte_eth_stats_get(pt_id, &port->stats);
1060                 port->tx_dropped = 0;
1061
1062                 map_port_queue_stats_mapping_registers(pt_id, port);
1063         }
1064         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1065                 fwd_streams[sm_id]->rx_packets = 0;
1066                 fwd_streams[sm_id]->tx_packets = 0;
1067                 fwd_streams[sm_id]->fwd_dropped = 0;
1068                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1069                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1070
1071 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1072                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1073                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1074                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1075                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1076 #endif
1077 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078                 fwd_streams[sm_id]->core_cycles = 0;
1079 #endif
1080         }
1081         if (with_tx_first) {
1082                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1083                 if (port_fwd_begin != NULL) {
1084                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1085                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1086                 }
1087                 while (with_tx_first--) {
1088                         launch_packet_forwarding(
1089                                         run_one_txonly_burst_on_core);
1090                         rte_eal_mp_wait_lcore();
1091                 }
1092                 port_fwd_end = tx_only_engine.port_fwd_end;
1093                 if (port_fwd_end != NULL) {
1094                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095                                 (*port_fwd_end)(fwd_ports_ids[i]);
1096                 }
1097         }
1098         launch_packet_forwarding(start_pkt_forward_on_core);
1099 }
1100
1101 void
1102 stop_packet_forwarding(void)
1103 {
1104         struct rte_eth_stats stats;
1105         struct rte_port *port;
1106         port_fwd_end_t  port_fwd_end;
1107         int i;
1108         portid_t   pt_id;
1109         streamid_t sm_id;
1110         lcoreid_t  lc_id;
1111         uint64_t total_recv;
1112         uint64_t total_xmit;
1113         uint64_t total_rx_dropped;
1114         uint64_t total_tx_dropped;
1115         uint64_t total_rx_nombuf;
1116         uint64_t tx_dropped;
1117         uint64_t rx_bad_ip_csum;
1118         uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120         uint64_t fwd_cycles;
1121 #endif
1122         static const char *acc_stats_border = "+++++++++++++++";
1123
1124         if (test_done) {
1125                 printf("Packet forwarding not started\n");
1126                 return;
1127         }
1128         printf("Telling cores to stop...");
1129         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1130                 fwd_lcores[lc_id]->stopped = 1;
1131         printf("\nWaiting for lcores to finish...\n");
1132         rte_eal_mp_wait_lcore();
1133         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1134         if (port_fwd_end != NULL) {
1135                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1136                         pt_id = fwd_ports_ids[i];
1137                         (*port_fwd_end)(pt_id);
1138                 }
1139         }
1140 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1141         fwd_cycles = 0;
1142 #endif
1143         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1144                 if (cur_fwd_config.nb_fwd_streams >
1145                     cur_fwd_config.nb_fwd_ports) {
1146                         fwd_stream_stats_display(sm_id);
1147                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1148                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1149                 } else {
1150                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1151                                 fwd_streams[sm_id];
1152                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1153                                 fwd_streams[sm_id];
1154                 }
1155                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1156                 tx_dropped = (uint64_t) (tx_dropped +
1157                                          fwd_streams[sm_id]->fwd_dropped);
1158                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1159
1160                 rx_bad_ip_csum =
1161                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1162                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1163                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1164                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1165                                                         rx_bad_ip_csum;
1166
1167                 rx_bad_l4_csum =
1168                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1169                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1170                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1171                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1172                                                         rx_bad_l4_csum;
1173
1174 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1175                 fwd_cycles = (uint64_t) (fwd_cycles +
1176                                          fwd_streams[sm_id]->core_cycles);
1177 #endif
1178         }
1179         total_recv = 0;
1180         total_xmit = 0;
1181         total_rx_dropped = 0;
1182         total_tx_dropped = 0;
1183         total_rx_nombuf  = 0;
1184         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1185                 pt_id = fwd_ports_ids[i];
1186
1187                 port = &ports[pt_id];
1188                 rte_eth_stats_get(pt_id, &stats);
1189                 stats.ipackets -= port->stats.ipackets;
1190                 port->stats.ipackets = 0;
1191                 stats.opackets -= port->stats.opackets;
1192                 port->stats.opackets = 0;
1193                 stats.ibytes   -= port->stats.ibytes;
1194                 port->stats.ibytes = 0;
1195                 stats.obytes   -= port->stats.obytes;
1196                 port->stats.obytes = 0;
1197                 stats.imissed  -= port->stats.imissed;
1198                 port->stats.imissed = 0;
1199                 stats.oerrors  -= port->stats.oerrors;
1200                 port->stats.oerrors = 0;
1201                 stats.rx_nombuf -= port->stats.rx_nombuf;
1202                 port->stats.rx_nombuf = 0;
1203
1204                 total_recv += stats.ipackets;
1205                 total_xmit += stats.opackets;
1206                 total_rx_dropped += stats.imissed;
1207                 total_tx_dropped += port->tx_dropped;
1208                 total_rx_nombuf  += stats.rx_nombuf;
1209
1210                 fwd_port_stats_display(pt_id, &stats);
1211         }
1212         printf("\n  %s Accumulated forward statistics for all ports"
1213                "%s\n",
1214                acc_stats_border, acc_stats_border);
1215         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1216                "%-"PRIu64"\n"
1217                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1218                "%-"PRIu64"\n",
1219                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1220                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1221         if (total_rx_nombuf > 0)
1222                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1223         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1224                "%s\n",
1225                acc_stats_border, acc_stats_border);
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227         if (total_recv > 0)
1228                 printf("\n  CPU cycles/packet=%u (total cycles="
1229                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1230                        (unsigned int)(fwd_cycles / total_recv),
1231                        fwd_cycles, total_recv);
1232 #endif
1233         printf("\nDone.\n");
1234         test_done = 1;
1235 }
1236
1237 void
1238 dev_set_link_up(portid_t pid)
1239 {
1240         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1241                 printf("\nSet link up fail.\n");
1242 }
1243
1244 void
1245 dev_set_link_down(portid_t pid)
1246 {
1247         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1248                 printf("\nSet link down fail.\n");
1249 }
1250
1251 static int
1252 all_ports_started(void)
1253 {
1254         portid_t pi;
1255         struct rte_port *port;
1256
1257         FOREACH_PORT(pi, ports) {
1258                 port = &ports[pi];
1259                 /* Check if there is a port which is not started */
1260                 if ((port->port_status != RTE_PORT_STARTED) &&
1261                         (port->slave_flag == 0))
1262                         return 0;
1263         }
1264
1265         /* No port is not started */
1266         return 1;
1267 }
1268
1269 int
1270 all_ports_stopped(void)
1271 {
1272         portid_t pi;
1273         struct rte_port *port;
1274
1275         FOREACH_PORT(pi, ports) {
1276                 port = &ports[pi];
1277                 if ((port->port_status != RTE_PORT_STOPPED) &&
1278                         (port->slave_flag == 0))
1279                         return 0;
1280         }
1281
1282         return 1;
1283 }
1284
1285 int
1286 port_is_started(portid_t port_id)
1287 {
1288         if (port_id_is_invalid(port_id, ENABLED_WARN))
1289                 return 0;
1290
1291         if (ports[port_id].port_status != RTE_PORT_STARTED)
1292                 return 0;
1293
1294         return 1;
1295 }
1296
1297 static int
1298 port_is_closed(portid_t port_id)
1299 {
1300         if (port_id_is_invalid(port_id, ENABLED_WARN))
1301                 return 0;
1302
1303         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1304                 return 0;
1305
1306         return 1;
1307 }
1308
1309 int
1310 start_port(portid_t pid)
1311 {
1312         int diag, need_check_link_status = -1;
1313         portid_t pi;
1314         queueid_t qi;
1315         struct rte_port *port;
1316         struct ether_addr mac_addr;
1317
1318         if (port_id_is_invalid(pid, ENABLED_WARN))
1319                 return 0;
1320
1321         if(dcb_config)
1322                 dcb_test = 1;
1323         FOREACH_PORT(pi, ports) {
1324                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1325                         continue;
1326
1327                 need_check_link_status = 0;
1328                 port = &ports[pi];
1329                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1330                                                  RTE_PORT_HANDLING) == 0) {
1331                         printf("Port %d is now not stopped\n", pi);
1332                         continue;
1333                 }
1334
1335                 if (port->need_reconfig > 0) {
1336                         port->need_reconfig = 0;
1337
1338                         printf("Configuring Port %d (socket %u)\n", pi,
1339                                         port->socket_id);
1340                         /* configure port */
1341                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1342                                                 &(port->dev_conf));
1343                         if (diag != 0) {
1344                                 if (rte_atomic16_cmpset(&(port->port_status),
1345                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1346                                         printf("Port %d can not be set back "
1347                                                         "to stopped\n", pi);
1348                                 printf("Fail to configure port %d\n", pi);
1349                                 /* try to reconfigure port next time */
1350                                 port->need_reconfig = 1;
1351                                 return -1;
1352                         }
1353                 }
1354                 if (port->need_reconfig_queues > 0) {
1355                         port->need_reconfig_queues = 0;
1356                         /* setup tx queues */
1357                         for (qi = 0; qi < nb_txq; qi++) {
1358                                 if ((numa_support) &&
1359                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1360                                         diag = rte_eth_tx_queue_setup(pi, qi,
1361                                                 nb_txd,txring_numa[pi],
1362                                                 &(port->tx_conf));
1363                                 else
1364                                         diag = rte_eth_tx_queue_setup(pi, qi,
1365                                                 nb_txd,port->socket_id,
1366                                                 &(port->tx_conf));
1367
1368                                 if (diag == 0)
1369                                         continue;
1370
1371                                 /* Fail to setup tx queue, return */
1372                                 if (rte_atomic16_cmpset(&(port->port_status),
1373                                                         RTE_PORT_HANDLING,
1374                                                         RTE_PORT_STOPPED) == 0)
1375                                         printf("Port %d can not be set back "
1376                                                         "to stopped\n", pi);
1377                                 printf("Fail to configure port %d tx queues\n", pi);
1378                                 /* try to reconfigure queues next time */
1379                                 port->need_reconfig_queues = 1;
1380                                 return -1;
1381                         }
1382                         /* setup rx queues */
1383                         for (qi = 0; qi < nb_rxq; qi++) {
1384                                 if ((numa_support) &&
1385                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1386                                         struct rte_mempool * mp =
1387                                                 mbuf_pool_find(rxring_numa[pi]);
1388                                         if (mp == NULL) {
1389                                                 printf("Failed to setup RX queue:"
1390                                                         "No mempool allocation"
1391                                                         " on the socket %d\n",
1392                                                         rxring_numa[pi]);
1393                                                 return -1;
1394                                         }
1395
1396                                         diag = rte_eth_rx_queue_setup(pi, qi,
1397                                              nb_rxd,rxring_numa[pi],
1398                                              &(port->rx_conf),mp);
1399                                 } else {
1400                                         struct rte_mempool *mp =
1401                                                 mbuf_pool_find(port->socket_id);
1402                                         if (mp == NULL) {
1403                                                 printf("Failed to setup RX queue:"
1404                                                         "No mempool allocation"
1405                                                         " on the socket %d\n",
1406                                                         port->socket_id);
1407                                                 return -1;
1408                                         }
1409                                         diag = rte_eth_rx_queue_setup(pi, qi,
1410                                              nb_rxd,port->socket_id,
1411                                              &(port->rx_conf), mp);
1412                                 }
1413                                 if (diag == 0)
1414                                         continue;
1415
1416                                 /* Fail to setup rx queue, return */
1417                                 if (rte_atomic16_cmpset(&(port->port_status),
1418                                                         RTE_PORT_HANDLING,
1419                                                         RTE_PORT_STOPPED) == 0)
1420                                         printf("Port %d can not be set back "
1421                                                         "to stopped\n", pi);
1422                                 printf("Fail to configure port %d rx queues\n", pi);
1423                                 /* try to reconfigure queues next time */
1424                                 port->need_reconfig_queues = 1;
1425                                 return -1;
1426                         }
1427                 }
1428                 /* start port */
1429                 if (rte_eth_dev_start(pi) < 0) {
1430                         printf("Fail to start port %d\n", pi);
1431
1432                         /* Fail to setup rx queue, return */
1433                         if (rte_atomic16_cmpset(&(port->port_status),
1434                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1435                                 printf("Port %d can not be set back to "
1436                                                         "stopped\n", pi);
1437                         continue;
1438                 }
1439
1440                 if (rte_atomic16_cmpset(&(port->port_status),
1441                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1442                         printf("Port %d can not be set into started\n", pi);
1443
1444                 rte_eth_macaddr_get(pi, &mac_addr);
1445                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1446                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1447                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1448                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1449
1450                 /* at least one port started, need checking link status */
1451                 need_check_link_status = 1;
1452         }
1453
1454         if (need_check_link_status == 1 && !no_link_check)
1455                 check_all_ports_link_status(RTE_PORT_ALL);
1456         else if (need_check_link_status == 0)
1457                 printf("Please stop the ports first\n");
1458
1459         printf("Done\n");
1460         return 0;
1461 }
1462
1463 void
1464 stop_port(portid_t pid)
1465 {
1466         portid_t pi;
1467         struct rte_port *port;
1468         int need_check_link_status = 0;
1469
1470         if (dcb_test) {
1471                 dcb_test = 0;
1472                 dcb_config = 0;
1473         }
1474
1475         if (port_id_is_invalid(pid, ENABLED_WARN))
1476                 return;
1477
1478         printf("Stopping ports...\n");
1479
1480         FOREACH_PORT(pi, ports) {
1481                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1482                         continue;
1483
1484                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1485                         printf("Please remove port %d from forwarding configuration.\n", pi);
1486                         continue;
1487                 }
1488
1489                 if (port_is_bonding_slave(pi)) {
1490                         printf("Please remove port %d from bonded device.\n", pi);
1491                         continue;
1492                 }
1493
1494                 port = &ports[pi];
1495                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1496                                                 RTE_PORT_HANDLING) == 0)
1497                         continue;
1498
1499                 rte_eth_dev_stop(pi);
1500
1501                 if (rte_atomic16_cmpset(&(port->port_status),
1502                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1503                         printf("Port %d can not be set into stopped\n", pi);
1504                 need_check_link_status = 1;
1505         }
1506         if (need_check_link_status && !no_link_check)
1507                 check_all_ports_link_status(RTE_PORT_ALL);
1508
1509         printf("Done\n");
1510 }
1511
1512 void
1513 close_port(portid_t pid)
1514 {
1515         portid_t pi;
1516         struct rte_port *port;
1517
1518         if (port_id_is_invalid(pid, ENABLED_WARN))
1519                 return;
1520
1521         printf("Closing ports...\n");
1522
1523         FOREACH_PORT(pi, ports) {
1524                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1525                         continue;
1526
1527                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1528                         printf("Please remove port %d from forwarding configuration.\n", pi);
1529                         continue;
1530                 }
1531
1532                 if (port_is_bonding_slave(pi)) {
1533                         printf("Please remove port %d from bonded device.\n", pi);
1534                         continue;
1535                 }
1536
1537                 port = &ports[pi];
1538                 if (rte_atomic16_cmpset(&(port->port_status),
1539                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1540                         printf("Port %d is already closed\n", pi);
1541                         continue;
1542                 }
1543
1544                 if (rte_atomic16_cmpset(&(port->port_status),
1545                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1546                         printf("Port %d is now not stopped\n", pi);
1547                         continue;
1548                 }
1549
1550                 rte_eth_dev_close(pi);
1551
1552                 if (rte_atomic16_cmpset(&(port->port_status),
1553                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1554                         printf("Port %d cannot be set to closed\n", pi);
1555         }
1556
1557         printf("Done\n");
1558 }
1559
1560 void
1561 attach_port(char *identifier)
1562 {
1563         portid_t pi = 0;
1564         unsigned int socket_id;
1565
1566         printf("Attaching a new port...\n");
1567
1568         if (identifier == NULL) {
1569                 printf("Invalid parameters are specified\n");
1570                 return;
1571         }
1572
1573         if (rte_eth_dev_attach(identifier, &pi))
1574                 return;
1575
1576         ports[pi].enabled = 1;
1577         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1578         /* if socket_id is invalid, set to 0 */
1579         if (check_socket_id(socket_id) < 0)
1580                 socket_id = 0;
1581         reconfig(pi, socket_id);
1582         rte_eth_promiscuous_enable(pi);
1583
1584         nb_ports = rte_eth_dev_count();
1585
1586         ports[pi].port_status = RTE_PORT_STOPPED;
1587
1588         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1589         printf("Done\n");
1590 }
1591
1592 void
1593 detach_port(uint8_t port_id)
1594 {
1595         char name[RTE_ETH_NAME_MAX_LEN];
1596
1597         printf("Detaching a port...\n");
1598
1599         if (!port_is_closed(port_id)) {
1600                 printf("Please close port first\n");
1601                 return;
1602         }
1603
1604         if (rte_eth_dev_detach(port_id, name))
1605                 return;
1606
1607         ports[port_id].enabled = 0;
1608         nb_ports = rte_eth_dev_count();
1609
1610         printf("Port '%s' is detached. Now total ports is %d\n",
1611                         name, nb_ports);
1612         printf("Done\n");
1613         return;
1614 }
1615
1616 void
1617 pmd_test_exit(void)
1618 {
1619         portid_t pt_id;
1620
1621         if (test_done == 0)
1622                 stop_packet_forwarding();
1623
1624         if (ports != NULL) {
1625                 no_link_check = 1;
1626                 FOREACH_PORT(pt_id, ports) {
1627                         printf("\nShutting down port %d...\n", pt_id);
1628                         fflush(stdout);
1629                         stop_port(pt_id);
1630                         close_port(pt_id);
1631                 }
1632         }
1633         printf("\nBye...\n");
1634 }
1635
1636 typedef void (*cmd_func_t)(void);
1637 struct pmd_test_command {
1638         const char *cmd_name;
1639         cmd_func_t cmd_func;
1640 };
1641
1642 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1643
1644 /* Check the link status of all ports in up to 9s, and print them finally */
1645 static void
1646 check_all_ports_link_status(uint32_t port_mask)
1647 {
1648 #define CHECK_INTERVAL 100 /* 100ms */
1649 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1650         uint8_t portid, count, all_ports_up, print_flag = 0;
1651         struct rte_eth_link link;
1652
1653         printf("Checking link statuses...\n");
1654         fflush(stdout);
1655         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1656                 all_ports_up = 1;
1657                 FOREACH_PORT(portid, ports) {
1658                         if ((port_mask & (1 << portid)) == 0)
1659                                 continue;
1660                         memset(&link, 0, sizeof(link));
1661                         rte_eth_link_get_nowait(portid, &link);
1662                         /* print link status if flag set */
1663                         if (print_flag == 1) {
1664                                 if (link.link_status)
1665                                         printf("Port %d Link Up - speed %u "
1666                                                 "Mbps - %s\n", (uint8_t)portid,
1667                                                 (unsigned)link.link_speed,
1668                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1669                                         ("full-duplex") : ("half-duplex\n"));
1670                                 else
1671                                         printf("Port %d Link Down\n",
1672                                                 (uint8_t)portid);
1673                                 continue;
1674                         }
1675                         /* clear all_ports_up flag if any link down */
1676                         if (link.link_status == ETH_LINK_DOWN) {
1677                                 all_ports_up = 0;
1678                                 break;
1679                         }
1680                 }
1681                 /* after finally printing all link status, get out */
1682                 if (print_flag == 1)
1683                         break;
1684
1685                 if (all_ports_up == 0) {
1686                         fflush(stdout);
1687                         rte_delay_ms(CHECK_INTERVAL);
1688                 }
1689
1690                 /* set the print_flag if all ports up or timeout */
1691                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1692                         print_flag = 1;
1693                 }
1694         }
1695 }
1696
1697 static int
1698 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1699 {
1700         uint16_t i;
1701         int diag;
1702         uint8_t mapping_found = 0;
1703
1704         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1705                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1706                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1707                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1708                                         tx_queue_stats_mappings[i].queue_id,
1709                                         tx_queue_stats_mappings[i].stats_counter_id);
1710                         if (diag != 0)
1711                                 return diag;
1712                         mapping_found = 1;
1713                 }
1714         }
1715         if (mapping_found)
1716                 port->tx_queue_stats_mapping_enabled = 1;
1717         return 0;
1718 }
1719
1720 static int
1721 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1722 {
1723         uint16_t i;
1724         int diag;
1725         uint8_t mapping_found = 0;
1726
1727         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1728                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1729                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1730                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1731                                         rx_queue_stats_mappings[i].queue_id,
1732                                         rx_queue_stats_mappings[i].stats_counter_id);
1733                         if (diag != 0)
1734                                 return diag;
1735                         mapping_found = 1;
1736                 }
1737         }
1738         if (mapping_found)
1739                 port->rx_queue_stats_mapping_enabled = 1;
1740         return 0;
1741 }
1742
1743 static void
1744 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1745 {
1746         int diag = 0;
1747
1748         diag = set_tx_queue_stats_mapping_registers(pi, port);
1749         if (diag != 0) {
1750                 if (diag == -ENOTSUP) {
1751                         port->tx_queue_stats_mapping_enabled = 0;
1752                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1753                 }
1754                 else
1755                         rte_exit(EXIT_FAILURE,
1756                                         "set_tx_queue_stats_mapping_registers "
1757                                         "failed for port id=%d diag=%d\n",
1758                                         pi, diag);
1759         }
1760
1761         diag = set_rx_queue_stats_mapping_registers(pi, port);
1762         if (diag != 0) {
1763                 if (diag == -ENOTSUP) {
1764                         port->rx_queue_stats_mapping_enabled = 0;
1765                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1766                 }
1767                 else
1768                         rte_exit(EXIT_FAILURE,
1769                                         "set_rx_queue_stats_mapping_registers "
1770                                         "failed for port id=%d diag=%d\n",
1771                                         pi, diag);
1772         }
1773 }
1774
1775 static void
1776 rxtx_port_config(struct rte_port *port)
1777 {
1778         port->rx_conf = port->dev_info.default_rxconf;
1779         port->tx_conf = port->dev_info.default_txconf;
1780
1781         /* Check if any RX/TX parameters have been passed */
1782         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1783                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1784
1785         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1786                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1787
1788         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1789                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1790
1791         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1792                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1793
1794         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1795                 port->rx_conf.rx_drop_en = rx_drop_en;
1796
1797         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1798                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1799
1800         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1801                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1802
1803         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1804                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1805
1806         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1807                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1808
1809         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1810                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1811
1812         if (txq_flags != RTE_PMD_PARAM_UNSET)
1813                 port->tx_conf.txq_flags = txq_flags;
1814 }
1815
1816 void
1817 init_port_config(void)
1818 {
1819         portid_t pid;
1820         struct rte_port *port;
1821
1822         FOREACH_PORT(pid, ports) {
1823                 port = &ports[pid];
1824                 port->dev_conf.rxmode = rx_mode;
1825                 port->dev_conf.fdir_conf = fdir_conf;
1826                 if (nb_rxq > 1) {
1827                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1828                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1829                 } else {
1830                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1831                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1832                 }
1833
1834                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1835                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1836                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1837                         else
1838                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1839                 }
1840
1841                 if (port->dev_info.max_vfs != 0) {
1842                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1843                                 port->dev_conf.rxmode.mq_mode =
1844                                         ETH_MQ_RX_VMDQ_RSS;
1845                         else
1846                                 port->dev_conf.rxmode.mq_mode =
1847                                         ETH_MQ_RX_NONE;
1848
1849                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1850                 }
1851
1852                 rxtx_port_config(port);
1853
1854                 rte_eth_macaddr_get(pid, &port->eth_addr);
1855
1856                 map_port_queue_stats_mapping_registers(pid, port);
1857 #ifdef RTE_NIC_BYPASS
1858                 rte_eth_dev_bypass_init(pid);
1859 #endif
1860         }
1861 }
1862
1863 void set_port_slave_flag(portid_t slave_pid)
1864 {
1865         struct rte_port *port;
1866
1867         port = &ports[slave_pid];
1868         port->slave_flag = 1;
1869 }
1870
1871 void clear_port_slave_flag(portid_t slave_pid)
1872 {
1873         struct rte_port *port;
1874
1875         port = &ports[slave_pid];
1876         port->slave_flag = 0;
1877 }
1878
1879 uint8_t port_is_bonding_slave(portid_t slave_pid)
1880 {
1881         struct rte_port *port;
1882
1883         port = &ports[slave_pid];
1884         return port->slave_flag;
1885 }
1886
1887 const uint16_t vlan_tags[] = {
1888                 0,  1,  2,  3,  4,  5,  6,  7,
1889                 8,  9, 10, 11,  12, 13, 14, 15,
1890                 16, 17, 18, 19, 20, 21, 22, 23,
1891                 24, 25, 26, 27, 28, 29, 30, 31
1892 };
1893
1894 static  int
1895 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1896                  enum dcb_mode_enable dcb_mode,
1897                  enum rte_eth_nb_tcs num_tcs,
1898                  uint8_t pfc_en)
1899 {
1900         uint8_t i;
1901
1902         /*
1903          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1904          * given above, and the number of traffic classes available for use.
1905          */
1906         if (dcb_mode == DCB_VT_ENABLED) {
1907                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1908                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1909                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1910                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1911
1912                 /* VMDQ+DCB RX and TX configrations */
1913                 vmdq_rx_conf->enable_default_pool = 0;
1914                 vmdq_rx_conf->default_pool = 0;
1915                 vmdq_rx_conf->nb_queue_pools =
1916                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1917                 vmdq_tx_conf->nb_queue_pools =
1918                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1919
1920                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1921                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1922                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1923                         vmdq_rx_conf->pool_map[i].pools =
1924                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1925                 }
1926                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1927                         vmdq_rx_conf->dcb_tc[i] = i;
1928                         vmdq_tx_conf->dcb_tc[i] = i;
1929                 }
1930
1931                 /* set DCB mode of RX and TX of multiple queues */
1932                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1933                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1934         } else {
1935                 struct rte_eth_dcb_rx_conf *rx_conf =
1936                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1937                 struct rte_eth_dcb_tx_conf *tx_conf =
1938                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1939
1940                 rx_conf->nb_tcs = num_tcs;
1941                 tx_conf->nb_tcs = num_tcs;
1942
1943                 for (i = 0; i < num_tcs; i++) {
1944                         rx_conf->dcb_tc[i] = i;
1945                         tx_conf->dcb_tc[i] = i;
1946                 }
1947                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1948                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1949                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1950         }
1951
1952         if (pfc_en)
1953                 eth_conf->dcb_capability_en =
1954                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1955         else
1956                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1957
1958         return 0;
1959 }
1960
1961 int
1962 init_port_dcb_config(portid_t pid,
1963                      enum dcb_mode_enable dcb_mode,
1964                      enum rte_eth_nb_tcs num_tcs,
1965                      uint8_t pfc_en)
1966 {
1967         struct rte_eth_conf port_conf;
1968         struct rte_eth_dev_info dev_info;
1969         struct rte_port *rte_port;
1970         int retval;
1971         uint16_t i;
1972
1973         rte_eth_dev_info_get(pid, &dev_info);
1974
1975         /* If dev_info.vmdq_pool_base is greater than 0,
1976          * the queue id of vmdq pools is started after pf queues.
1977          */
1978         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1979                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1980                         " for port %d.", pid);
1981                 return -1;
1982         }
1983
1984         /* Assume the ports in testpmd have the same dcb capability
1985          * and has the same number of rxq and txq in dcb mode
1986          */
1987         if (dcb_mode == DCB_VT_ENABLED) {
1988                 nb_rxq = dev_info.max_rx_queues;
1989                 nb_txq = dev_info.max_tx_queues;
1990         } else {
1991                 /*if vt is disabled, use all pf queues */
1992                 if (dev_info.vmdq_pool_base == 0) {
1993                         nb_rxq = dev_info.max_rx_queues;
1994                         nb_txq = dev_info.max_tx_queues;
1995                 } else {
1996                         nb_rxq = (queueid_t)num_tcs;
1997                         nb_txq = (queueid_t)num_tcs;
1998
1999                 }
2000         }
2001         rx_free_thresh = 64;
2002
2003         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2004         /* Enter DCB configuration status */
2005         dcb_config = 1;
2006
2007         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2008         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2009         if (retval < 0)
2010                 return retval;
2011
2012         rte_port = &ports[pid];
2013         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2014
2015         rxtx_port_config(rte_port);
2016         /* VLAN filter */
2017         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2018         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2019                 rx_vft_set(pid, vlan_tags[i], 1);
2020
2021         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2022         map_port_queue_stats_mapping_registers(pid, rte_port);
2023
2024         rte_port->dcb_flag = 1;
2025
2026         return 0;
2027 }
2028
2029 static void
2030 init_port(void)
2031 {
2032         portid_t pid;
2033
2034         /* Configuration of Ethernet ports. */
2035         ports = rte_zmalloc("testpmd: ports",
2036                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2037                             RTE_CACHE_LINE_SIZE);
2038         if (ports == NULL) {
2039                 rte_exit(EXIT_FAILURE,
2040                                 "rte_zmalloc(%d struct rte_port) failed\n",
2041                                 RTE_MAX_ETHPORTS);
2042         }
2043
2044         /* enabled allocated ports */
2045         for (pid = 0; pid < nb_ports; pid++)
2046                 ports[pid].enabled = 1;
2047 }
2048
2049 static void
2050 force_quit(void)
2051 {
2052         pmd_test_exit();
2053         prompt_exit();
2054 }
2055
2056 static void
2057 signal_handler(int signum)
2058 {
2059         if (signum == SIGINT || signum == SIGTERM) {
2060                 printf("\nSignal %d received, preparing to exit...\n",
2061                                 signum);
2062 #ifdef RTE_LIBRTE_PDUMP
2063                 /* uninitialize packet capture framework */
2064                 rte_pdump_uninit();
2065 #endif
2066                 force_quit();
2067                 /* exit with the expected status */
2068                 signal(signum, SIG_DFL);
2069                 kill(getpid(), signum);
2070         }
2071 }
2072
2073 int
2074 main(int argc, char** argv)
2075 {
2076         int  diag;
2077         uint8_t port_id;
2078
2079         signal(SIGINT, signal_handler);
2080         signal(SIGTERM, signal_handler);
2081
2082         diag = rte_eal_init(argc, argv);
2083         if (diag < 0)
2084                 rte_panic("Cannot init EAL\n");
2085
2086 #ifdef RTE_LIBRTE_PDUMP
2087         /* initialize packet capture framework */
2088         rte_pdump_init(NULL);
2089 #endif
2090
2091         nb_ports = (portid_t) rte_eth_dev_count();
2092         if (nb_ports == 0)
2093                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2094
2095         /* allocate port structures, and init them */
2096         init_port();
2097
2098         set_def_fwd_config();
2099         if (nb_lcores == 0)
2100                 rte_panic("Empty set of forwarding logical cores - check the "
2101                           "core mask supplied in the command parameters\n");
2102
2103         argc -= diag;
2104         argv += diag;
2105         if (argc > 1)
2106                 launch_args_parse(argc, argv);
2107
2108         if (!nb_rxq && !nb_txq)
2109                 printf("Warning: Either rx or tx queues should be non-zero\n");
2110
2111         if (nb_rxq > 1 && nb_rxq > nb_txq)
2112                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2113                        "but nb_txq=%d will prevent to fully test it.\n",
2114                        nb_rxq, nb_txq);
2115
2116         init_config();
2117         if (start_port(RTE_PORT_ALL) != 0)
2118                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2119
2120         /* set all ports to promiscuous mode by default */
2121         FOREACH_PORT(port_id, ports)
2122                 rte_eth_promiscuous_enable(port_id);
2123
2124 #ifdef RTE_LIBRTE_CMDLINE
2125         if (interactive == 1) {
2126                 if (auto_start) {
2127                         printf("Start automatic packet forwarding\n");
2128                         start_packet_forwarding(0);
2129                 }
2130                 prompt();
2131         } else
2132 #endif
2133         {
2134                 char c;
2135                 int rc;
2136
2137                 printf("No commandline core given, start packet forwarding\n");
2138                 start_packet_forwarding(0);
2139                 printf("Press enter to exit\n");
2140                 rc = read(0, &c, 1);
2141                 pmd_test_exit();
2142                 if (rc < 0)
2143                         return 1;
2144         }
2145
2146         return 0;
2147 }