4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
68 #include <rte_mempool.h>
71 #include <rte_string_fns.h>
76 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
77 #define NB_MBUF (32 * 1024)
80 * RX and TX Prefetch, Host, and Write-back threshold values should be
81 * carefully set for optimal performance. Consult the network
82 * controller's datasheet and supporting DPDK documentation for guidance
83 * on how these parameters should be set.
85 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
86 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
87 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
90 * These default values are optimized for use with the Intel(R) 82599 10 GbE
91 * Controller and the DPDK ixgbe PMD. Consider using other values for other
92 * network controllers and/or network drivers.
94 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
95 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
96 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
98 #define MAX_PKT_BURST 32
99 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
101 #define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
102 #define TSC_COUNT_LIMIT 1000
104 #define ACTION_ENCRYPT 1
105 #define ACTION_DECRYPT 2
108 * Configurable number of RX/TX ring descriptors
110 #define RTE_TEST_RX_DESC_DEFAULT 128
111 #define RTE_TEST_TX_DESC_DEFAULT 512
112 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
113 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
115 /* ethernet addresses of ports */
116 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
118 /* mask of enabled ports */
119 static unsigned enabled_port_mask = 0;
120 static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
124 struct rte_mbuf *m_table[MAX_PKT_BURST];
127 struct lcore_rx_queue {
132 #define MAX_RX_QUEUE_PER_LCORE 16
134 #define MAX_LCORE_PARAMS 1024
135 struct lcore_params {
141 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
142 static struct lcore_params lcore_params_array_default[] = {
154 static struct lcore_params * lcore_params = lcore_params_array_default;
155 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
156 sizeof(lcore_params_array_default[0]);
158 static struct rte_eth_conf port_conf = {
161 .header_split = 0, /**< Header Split disabled */
162 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
163 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
164 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
165 .hw_strip_crc = 0, /**< CRC stripped by hardware */
170 .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
174 .mq_mode = ETH_MQ_TX_NONE,
178 static const struct rte_eth_rxconf rx_conf = {
180 .pthresh = RX_PTHRESH,
181 .hthresh = RX_HTHRESH,
182 .wthresh = RX_WTHRESH,
186 static const struct rte_eth_txconf tx_conf = {
188 .pthresh = TX_PTHRESH,
189 .hthresh = TX_HTHRESH,
190 .wthresh = TX_WTHRESH,
192 .tx_free_thresh = 0, /* Use PMD default values */
193 .tx_rs_thresh = 0, /* Use PMD default values */
196 static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
203 uint16_t rx_queue_list_pos;
204 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
205 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
206 struct mbuf_table rx_mbuf;
207 uint32_t rx_mbuf_pos;
208 uint32_t rx_curr_queue;
209 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
210 } __rte_cache_aligned;
212 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
214 static inline struct rte_mbuf *
215 nic_rx_get_packet(struct lcore_conf *qconf)
217 struct rte_mbuf *pkt;
219 if (unlikely(qconf->n_rx_queue == 0))
222 /* Look for the next queue with packets; return if none */
223 if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
226 qconf->rx_mbuf_pos = 0;
227 for (i = 0; i < qconf->n_rx_queue; i++) {
228 qconf->rx_mbuf.len = rte_eth_rx_burst(
229 qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
230 qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
231 qconf->rx_mbuf.m_table, MAX_PKT_BURST);
233 qconf->rx_curr_queue++;
234 if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
235 qconf->rx_curr_queue = 0;
236 if (likely(qconf->rx_mbuf.len > 0))
239 if (unlikely(i == qconf->n_rx_queue))
243 /* Get the next packet from the current queue; if last packet, go to next queue */
244 pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
245 qconf->rx_mbuf_pos++;
251 nic_tx_flush_queues(struct lcore_conf *qconf)
255 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
256 struct rte_mbuf **m_table = NULL;
257 uint16_t queueid, len;
260 if (likely((qconf->tx_mask & (1 << portid)) == 0))
263 len = qconf->tx_mbufs[portid].len;
264 if (likely(len == 0))
267 queueid = qconf->tx_queue_id[portid];
268 m_table = qconf->tx_mbufs[portid].m_table;
270 n = rte_eth_tx_burst(portid, queueid, m_table, len);
271 for (i = n; i < len; i++){
272 rte_pktmbuf_free(m_table[i]);
275 qconf->tx_mbufs[portid].len = 0;
278 qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
282 nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
284 struct lcore_conf *qconf;
288 if (unlikely(pkt == NULL)) {
292 lcoreid = rte_lcore_id();
293 qconf = &lcore_conf[lcoreid];
295 len = qconf->tx_mbufs[port].len;
296 qconf->tx_mbufs[port].m_table[len] = pkt;
299 /* enough pkts to be sent */
300 if (unlikely(len == MAX_PKT_BURST)) {
304 queueid = qconf->tx_queue_id[port];
305 n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
306 for (i = n; i < MAX_PKT_BURST; i++){
307 rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
310 qconf->tx_mask &= ~(1 << port);
314 qconf->tx_mbufs[port].len = len;
317 static inline uint8_t
318 get_output_port(uint8_t input_port)
320 RTE_BUILD_BUG_ON((RTE_MAX_ETHPORTS & 1) != 0);
321 return (uint8_t)(input_port ^ 1);
324 /* main processing loop */
325 static __attribute__((noreturn)) int
326 main_loop(__attribute__((unused)) void *dummy)
329 struct lcore_conf *qconf;
330 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
332 lcoreid = rte_lcore_id();
333 qconf = &lcore_conf[lcoreid];
335 printf("Thread %u starting...\n", lcoreid);
338 struct rte_mbuf *pkt;
339 uint32_t pkt_from_nic_rx = 0;
342 /* Flush TX queues */
344 if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
345 uint64_t tsc, diff_tsc;
349 diff_tsc = tsc - qconf->tsc;
350 if (unlikely(diff_tsc > drain_tsc)) {
351 nic_tx_flush_queues(qconf);
352 crypto_flush_tx_queue(lcoreid);
356 qconf->tsc_count = 0;
360 * Check the Intel QuickAssist queues first
363 pkt = (struct rte_mbuf *) crypto_get_next_response();
365 pkt = nic_rx_get_packet(qconf);
370 /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
371 if (pkt_from_nic_rx) {
372 struct ipv4_hdr *ip = (struct ipv4_hdr *) (rte_pktmbuf_mtod(pkt, unsigned char *) +
373 sizeof(struct ether_hdr));
374 if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
375 if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
376 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
377 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
378 rte_pktmbuf_free(pkt);
382 if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
383 if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
384 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
385 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
386 rte_pktmbuf_free(pkt);
391 port = get_output_port(pkt->pkt.in_port);
393 /* Transmit the packet */
394 nic_tx_send_packet(pkt, port);
398 static inline unsigned
399 get_port_max_rx_queues(uint8_t port_id)
401 struct rte_eth_dev_info dev_info;
403 rte_eth_dev_info_get(port_id, &dev_info);
404 return dev_info.max_rx_queues;
407 static inline unsigned
408 get_port_max_tx_queues(uint8_t port_id)
410 struct rte_eth_dev_info dev_info;
412 rte_eth_dev_info_get(port_id, &dev_info);
413 return dev_info.max_tx_queues;
417 check_lcore_params(void)
421 for (i = 0; i < nb_lcore_params; ++i) {
422 if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
423 printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
426 if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
427 printf("error: lcore %hhu is not enabled in lcore mask\n",
428 lcore_params[i].lcore_id);
436 check_port_config(const unsigned nb_ports)
441 for (i = 0; i < nb_lcore_params; ++i) {
442 portid = lcore_params[i].port_id;
443 if ((enabled_port_mask & (1 << portid)) == 0) {
444 printf("port %u is not enabled in port mask\n", portid);
447 if (portid >= nb_ports) {
448 printf("port %u is not present on the board\n", portid);
456 get_port_n_rx_queues(const uint8_t port)
461 for (i = 0; i < nb_lcore_params; ++i) {
462 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
463 queue = lcore_params[i].queue_id;
465 return (uint8_t)(++queue);
469 init_lcore_rx_queues(void)
471 uint16_t i, nb_rx_queue;
474 for (i = 0; i < nb_lcore_params; ++i) {
475 lcore = lcore_params[i].lcore_id;
476 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
477 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
478 printf("error: too many queues (%u) for lcore: %u\n",
479 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
482 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
483 lcore_params[i].port_id;
484 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
485 lcore_params[i].queue_id;
486 lcore_conf[lcore].n_rx_queue++;
493 print_usage(const char *prgname)
495 printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
496 " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n"
497 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
498 " --no-promisc: disable promiscuous mode (default is ON)\n"
499 " --config '(port,queue,lcore)': rx queues configuration\n",
504 parse_portmask(const char *portmask)
509 /* parse hexadecimal string */
510 pm = strtoul(portmask, &end, 16);
511 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
518 parse_config(const char *q_arg)
521 const char *p, *p_end = q_arg;
529 unsigned long int_fld[_NUM_FLD];
530 char *str_fld[_NUM_FLD];
536 while ((p = strchr(p_end,'(')) != NULL) {
537 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
538 printf("exceeded max number of lcore params: %hu\n",
543 if((p_end = strchr(p,')')) == NULL)
547 if(size >= sizeof(s))
550 rte_snprintf(s, sizeof(s), "%.*s", size, p);
551 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
553 for (i = 0; i < _NUM_FLD; i++) {
555 int_fld[i] = strtoul(str_fld[i], &end, 0);
556 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
559 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
560 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
561 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
564 lcore_params = lcore_params_array;
568 /* Parse the argument given in the command line of the application */
570 parse_args(int argc, char **argv)
575 char *prgname = argv[0];
576 static struct option lgopts[] = {
578 {"no-promisc", 0, 0, 0},
584 while ((opt = getopt_long(argc, argvopt, "p:",
585 lgopts, &option_index)) != EOF) {
590 enabled_port_mask = parse_portmask(optarg);
591 if (enabled_port_mask == 0) {
592 printf("invalid portmask\n");
593 print_usage(prgname);
600 if (strcmp(lgopts[option_index].name, "config") == 0) {
601 ret = parse_config(optarg);
603 printf("invalid config\n");
604 print_usage(prgname);
608 if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
609 printf("Promiscuous mode disabled\n");
614 print_usage(prgname);
619 if (enabled_port_mask == 0) {
620 printf("portmask not specified\n");
621 print_usage(prgname);
626 argv[optind-1] = prgname;
629 optind = 0; /* reset getopt lib */
634 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
636 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
637 eth_addr->addr_bytes[0],
638 eth_addr->addr_bytes[1],
639 eth_addr->addr_bytes[2],
640 eth_addr->addr_bytes[3],
641 eth_addr->addr_bytes[4],
642 eth_addr->addr_bytes[5]);
648 const unsigned flags = 0;
653 RTE_LCORE_FOREACH(lcoreid) {
654 socketid = rte_lcore_to_socket_id(lcoreid);
655 if (socketid >= RTE_MAX_NUMA_NODES) {
656 printf("Socket %d of lcore %u is out of range %d\n",
657 socketid, lcoreid, RTE_MAX_NUMA_NODES);
660 if (pktmbuf_pool[socketid] == NULL) {
661 rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
662 pktmbuf_pool[socketid] =
663 rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
664 sizeof(struct rte_pktmbuf_pool_private),
665 rte_pktmbuf_pool_init, NULL,
666 rte_pktmbuf_init, NULL,
668 if (pktmbuf_pool[socketid] == NULL) {
669 printf("Cannot init mbuf pool on socket %d\n", socketid);
672 printf("Allocated mbuf pool on socket %d\n", socketid);
679 MAIN(int argc, char **argv)
681 struct lcore_conf *qconf;
682 struct rte_eth_link link;
687 uint32_t nb_tx_queue;
688 uint8_t portid, nb_rx_queue, queue, socketid;
691 ret = rte_eal_init(argc, argv);
697 /* parse application arguments (after the EAL ones) */
698 ret = parse_args(argc, argv);
703 #ifdef RTE_LIBRTE_IGB_PMD
704 if (rte_igb_pmd_init() < 0)
705 rte_panic("Cannot init igb pmd\n");
707 #ifdef RTE_LIBRTE_IXGBE_PMD
708 if (rte_ixgbe_pmd_init() < 0)
709 rte_panic("Cannot init ixgbe pmd\n");
712 if (rte_eal_pci_probe() < 0)
713 rte_panic("Cannot probe PCI\n");
715 if (check_lcore_params() < 0)
716 rte_panic("check_lcore_params failed\n");
718 ret = init_lcore_rx_queues();
726 nb_ports = rte_eth_dev_count();
727 if (nb_ports > RTE_MAX_ETHPORTS)
728 nb_ports = RTE_MAX_ETHPORTS;
730 if (check_port_config(nb_ports) < 0)
731 rte_panic("check_port_config failed\n");
733 /* initialize all ports */
734 for (portid = 0; portid < nb_ports; portid++) {
735 /* skip ports that are not enabled */
736 if ((enabled_port_mask & (1 << portid)) == 0) {
737 printf("\nSkipping disabled port %d\n", portid);
742 printf("Initializing port %d ... ", portid );
745 nb_rx_queue = get_port_n_rx_queues(portid);
746 if (nb_rx_queue > get_port_max_rx_queues(portid))
747 rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
748 " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
750 nb_tx_queue = rte_lcore_count();
751 if (nb_tx_queue > get_port_max_tx_queues(portid))
752 rte_panic("Number of lcores %u exceeds max number of tx queues %u"
753 " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
755 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
756 nb_rx_queue, (unsigned)nb_tx_queue );
757 ret = rte_eth_dev_configure(portid, nb_rx_queue,
758 (uint16_t)nb_tx_queue, &port_conf);
760 rte_panic("Cannot configure device: err=%d, port=%d\n",
763 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
764 print_ethaddr(" Address:", &ports_eth_addr[portid]);
767 /* init one TX queue per couple (lcore,port) */
769 RTE_LCORE_FOREACH(lcoreid) {
770 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
771 printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
773 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
776 rte_panic("rte_eth_tx_queue_setup: err=%d, "
777 "port=%d\n", ret, portid);
779 qconf = &lcore_conf[lcoreid];
780 qconf->tx_queue_id[portid] = queueid;
786 RTE_LCORE_FOREACH(lcoreid) {
787 qconf = &lcore_conf[lcoreid];
788 printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
791 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
792 portid = qconf->rx_queue_list[queue].port_id;
793 queueid = qconf->rx_queue_list[queue].queue_id;
794 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
795 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
798 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
799 socketid, &rx_conf, pktmbuf_pool[socketid]);
801 rte_panic("rte_eth_rx_queue_setup: err=%d,"
802 "port=%d\n", ret, portid);
809 for (portid = 0; portid < nb_ports; portid++) {
810 if ((enabled_port_mask & (1 << portid)) == 0)
813 ret = rte_eth_dev_start(portid);
815 rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
818 printf("done: Port %d ", portid);
820 /* get link status */
821 rte_eth_link_get(portid, &link);
822 if (link.link_status)
823 printf(" Link Up - speed %u Mbps - %s\n",
824 (unsigned) link.link_speed,
825 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
826 ("full-duplex") : ("half-duplex\n"));
828 printf(" Link Down\n");
830 * If enabled, put device in promiscuous mode.
831 * This allows IO forwarding mode to forward packets
832 * to itself through 2 cross-connected ports of the
836 rte_eth_promiscuous_enable(portid);
838 printf("Crypto: Initializing Crypto...\n");
839 if (crypto_init() != 0)
842 RTE_LCORE_FOREACH(lcoreid) {
843 if (per_core_crypto_init(lcoreid) != 0) {
844 printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
848 printf("Crypto: Initialization complete\n");
849 /* launch per-lcore init on every lcore */
850 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
851 RTE_LCORE_FOREACH_SLAVE(lcoreid) {
852 if (rte_eal_wait_lcore(lcoreid) < 0)