examples: fix port mask parsing failure handling
[dpdk.git] / examples / l3fwd-graph / main.c
index ab67134..d3fcf41 100644 (file)
 
 #include <rte_branch_prediction.h>
 #include <rte_common.h>
+#include <rte_cycles.h>
 #include <rte_eal.h>
 #include <rte_ethdev.h>
+#include <rte_graph_worker.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_mempool.h>
+#include <rte_node_eth_api.h>
+#include <rte_node_ip4_api.h>
 #include <rte_per_lcore.h>
 #include <rte_string_fns.h>
 #include <rte_vect.h>
 
 #define NB_SOCKETS 8
 
+/* Static global variables used within this file. */
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
 /**< Ports set in promiscuous mode off by default. */
 static int promiscuous_on;
 
@@ -60,6 +70,7 @@ static volatile bool force_quit;
 
 /* Ethernet addresses of ports */
 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 xmm_t val_eth[RTE_MAX_ETHPORTS];
 
 /* Mask of enabled ports */
@@ -68,12 +79,17 @@ static uint32_t enabled_port_mask;
 struct lcore_rx_queue {
        uint16_t port_id;
        uint8_t queue_id;
+       char node_name[RTE_NODE_NAMESIZE];
 };
 
 /* Lcore conf */
 struct lcore_conf {
        uint16_t n_rx_queue;
        struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+
+       struct rte_graph *graph;
+       char name[RTE_GRAPH_NAMESIZE];
+       rte_graph_t graph_id;
 } __rte_cache_aligned;
 
 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -110,6 +126,27 @@ static struct rte_eth_conf port_conf = {
        },
 };
 
+static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
+
+static struct rte_node_ethdev_config ethdev_conf[RTE_MAX_ETHPORTS];
+
+struct ipv4_l3fwd_lpm_route {
+       uint32_t ip;
+       uint8_t depth;
+       uint8_t if_out;
+};
+
+#define IPV4_L3FWD_LPM_NUM_ROUTES                                              \
+       (sizeof(ipv4_l3fwd_lpm_route_array) /                                  \
+        sizeof(ipv4_l3fwd_lpm_route_array[0]))
+/* 198.18.0.0/16 are set aside for RFC2544 benchmarking. */
+static struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = {
+       {RTE_IPV4(198, 18, 0, 0), 24, 0}, {RTE_IPV4(198, 18, 1, 0), 24, 1},
+       {RTE_IPV4(198, 18, 2, 0), 24, 2}, {RTE_IPV4(198, 18, 3, 0), 24, 3},
+       {RTE_IPV4(198, 18, 4, 0), 24, 4}, {RTE_IPV4(198, 18, 5, 0), 24, 5},
+       {RTE_IPV4(198, 18, 6, 0), 24, 6}, {RTE_IPV4(198, 18, 7, 0), 24, 7},
+};
+
 static int
 check_lcore_params(void)
 {
@@ -165,6 +202,27 @@ check_port_config(void)
        return 0;
 }
 
+static uint8_t
+get_port_n_rx_queues(const uint16_t port)
+{
+       int queue = -1;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               if (lcore_params[i].port_id == port) {
+                       if (lcore_params[i].queue_id == queue + 1)
+                               queue = lcore_params[i].queue_id;
+                       else
+                               rte_exit(EXIT_FAILURE,
+                                        "Queue ids of the port %d must be"
+                                        " in sequence and must start with 0\n",
+                                        lcore_params[i].port_id);
+               }
+       }
+
+       return (uint8_t)(++queue);
+}
+
 static int
 init_lcore_rx_queues(void)
 {
@@ -244,10 +302,7 @@ parse_portmask(const char *portmask)
        /* Parse hexadecimal string */
        pm = strtoul(portmask, &end, 16);
        if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
-               return -1;
-
-       if (pm == 0)
-               return -1;
+               return 0;
 
        return pm;
 }
@@ -479,6 +534,128 @@ parse_args(int argc, char **argv)
        return ret;
 }
 
+static void
+print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
+{
+       char buf[RTE_ETHER_ADDR_FMT_SIZE];
+       rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+       printf("%s%s", name, buf);
+}
+
+static int
+init_mem(uint16_t portid, uint32_t nb_mbuf)
+{
+       uint32_t lcore_id;
+       int socketid;
+       char s[64];
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+
+               if (numa_on)
+                       socketid = rte_lcore_to_socket_id(lcore_id);
+               else
+                       socketid = 0;
+
+               if (socketid >= NB_SOCKETS) {
+                       rte_exit(EXIT_FAILURE,
+                                "Socket %d of lcore %u is out of range %d\n",
+                                socketid, lcore_id, NB_SOCKETS);
+               }
+
+               if (pktmbuf_pool[portid][socketid] == NULL) {
+                       snprintf(s, sizeof(s), "mbuf_pool_%d:%d", portid,
+                                socketid);
+                       /* Create a pool with priv size of a cacheline */
+                       pktmbuf_pool[portid][socketid] =
+                               rte_pktmbuf_pool_create(
+                                       s, nb_mbuf, MEMPOOL_CACHE_SIZE,
+                                       RTE_CACHE_LINE_SIZE,
+                                       RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
+                       if (pktmbuf_pool[portid][socketid] == NULL)
+                               rte_exit(EXIT_FAILURE,
+                                        "Cannot init mbuf pool on socket %d\n",
+                                        socketid);
+                       else
+                               printf("Allocated mbuf pool on socket %d\n",
+                                      socketid);
+               }
+       }
+
+       return 0;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90  /* 9s (90 * 100ms) in total */
+       uint8_t count, all_ports_up, print_flag = 0;
+       struct rte_eth_link link;
+       uint16_t portid;
+       int ret;
+
+       printf("\nChecking link status");
+       fflush(stdout);
+       for (count = 0; count <= MAX_CHECK_TIME; count++) {
+               if (force_quit)
+                       return;
+               all_ports_up = 1;
+               RTE_ETH_FOREACH_DEV(portid)
+               {
+                       if (force_quit)
+                               return;
+                       if ((port_mask & (1 << portid)) == 0)
+                               continue;
+                       memset(&link, 0, sizeof(link));
+                       ret = rte_eth_link_get_nowait(portid, &link);
+                       if (ret < 0) {
+                               all_ports_up = 0;
+                               if (print_flag == 1)
+                                       printf("Port %u link get failed: %s\n",
+                                               portid, rte_strerror(-ret));
+                               continue;
+                       }
+                       /* Print link status if flag set */
+                       if (print_flag == 1) {
+                               if (link.link_status)
+                                       printf("Port%d Link Up. Speed %u Mbps "
+                                              "-%s\n",
+                                              portid, link.link_speed,
+                                              (link.link_duplex ==
+                                               ETH_LINK_FULL_DUPLEX)
+                                                      ? ("full-duplex")
+                                                      : ("half-duplex\n"));
+                               else
+                                       printf("Port %d Link Down\n", portid);
+                               continue;
+                       }
+                       /* Clear all_ports_up flag if any link down */
+                       if (link.link_status == ETH_LINK_DOWN) {
+                               all_ports_up = 0;
+                               break;
+                       }
+               }
+               /* After finally printing all link status, get out */
+               if (print_flag == 1)
+                       break;
+
+               if (all_ports_up == 0) {
+                       printf(".");
+                       fflush(stdout);
+                       rte_delay_ms(CHECK_INTERVAL);
+               }
+
+               /* Set the print_flag if all ports up or timeout */
+               if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+                       print_flag = 1;
+                       printf("Done\n");
+               }
+       }
+}
+
 static void
 signal_handler(int signum)
 {
@@ -489,10 +666,89 @@ signal_handler(int signum)
        }
 }
 
+static void
+print_stats(void)
+{
+       const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
+       const char clr[] = {27, '[', '2', 'J', '\0'};
+       struct rte_graph_cluster_stats_param s_param;
+       struct rte_graph_cluster_stats *stats;
+       const char *pattern = "worker_*";
+
+       /* Prepare stats object */
+       memset(&s_param, 0, sizeof(s_param));
+       s_param.f = stdout;
+       s_param.socket_id = SOCKET_ID_ANY;
+       s_param.graph_patterns = &pattern;
+       s_param.nb_graph_patterns = 1;
+
+       stats = rte_graph_cluster_stats_create(&s_param);
+       if (stats == NULL)
+               rte_exit(EXIT_FAILURE, "Unable to create stats object\n");
+
+       while (!force_quit) {
+               /* Clear screen and move to top left */
+               printf("%s%s", clr, topLeft);
+               rte_graph_cluster_stats_get(stats, 0);
+               rte_delay_ms(1E3);
+       }
+
+       rte_graph_cluster_stats_destroy(stats);
+}
+
+/* Main processing loop */
+static int
+graph_main_loop(void *conf)
+{
+       struct lcore_conf *qconf;
+       struct rte_graph *graph;
+       uint32_t lcore_id;
+
+       RTE_SET_USED(conf);
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_conf[lcore_id];
+       graph = qconf->graph;
+
+       if (!graph) {
+               RTE_LOG(INFO, L3FWD_GRAPH, "Lcore %u has nothing to do\n",
+                       lcore_id);
+               return 0;
+       }
+
+       RTE_LOG(INFO, L3FWD_GRAPH,
+               "Entering main loop on lcore %u, graph %s(%p)\n", lcore_id,
+               qconf->name, graph);
+
+       while (likely(!force_quit))
+               rte_graph_walk(graph);
+
+       return 0;
+}
+
 int
 main(int argc, char **argv)
 {
-       uint16_t portid;
+       /* Rewrite data of src and dst ether addr */
+       uint8_t rewrite_data[2 * sizeof(struct rte_ether_addr)];
+       static const char * const default_patterns[] = {
+               "ip4*",
+               "ethdev_tx-*",
+               "pkt_drop",
+       };
+       uint8_t nb_rx_queue, queue, socketid;
+       struct rte_graph_param graph_conf;
+       struct rte_eth_dev_info dev_info;
+       uint32_t nb_ports, nb_conf = 0;
+       uint32_t n_tx_queue, nb_lcores;
+       struct rte_eth_txconf *txconf;
+       uint16_t queueid, portid, i;
+       const char **node_patterns;
+       struct lcore_conf *qconf;
+       uint16_t nb_graphs = 0;
+       uint16_t nb_patterns;
+       uint8_t rewrite_len;
+       uint32_t lcore_id;
        int ret;
 
        /* Init EAL */
@@ -528,6 +784,347 @@ main(int argc, char **argv)
        if (check_port_config() < 0)
                rte_exit(EXIT_FAILURE, "check_port_config() failed\n");
 
+       nb_ports = rte_eth_dev_count_avail();
+       nb_lcores = rte_lcore_count();
+
+       /* Initialize all ports */
+       RTE_ETH_FOREACH_DEV(portid)
+       {
+               struct rte_eth_conf local_port_conf = port_conf;
+
+               /* Skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("\nSkipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               /* Init port */
+               printf("Initializing port %d ... ", portid);
+               fflush(stdout);
+
+               nb_rx_queue = get_port_n_rx_queues(portid);
+               n_tx_queue = nb_lcores;
+               if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
+                       n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+               printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+                      nb_rx_queue, n_tx_queue);
+
+               rte_eth_dev_info_get(portid, &dev_info);
+               if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+                       local_port_conf.txmode.offloads |=
+                               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+               local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+                       dev_info.flow_type_rss_offloads;
+               if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+                   port_conf.rx_adv_conf.rss_conf.rss_hf) {
+                       printf("Port %u modified RSS hash function based on "
+                              "hardware support,"
+                              "requested:%#" PRIx64 " configured:%#" PRIx64
+                              "\n",
+                              portid, port_conf.rx_adv_conf.rss_conf.rss_hf,
+                              local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+               }
+
+               ret = rte_eth_dev_configure(portid, nb_rx_queue,
+                                           n_tx_queue, &local_port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Cannot configure device: err=%d, port=%d\n",
+                                ret, portid);
+
+               ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+                                                      &nb_txd);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Cannot adjust number of descriptors: err=%d, "
+                                "port=%d\n",
+                                ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+               print_ethaddr(
+                       "Destination:",
+                       (const struct rte_ether_addr *)&dest_eth_addr[portid]);
+               printf(", ");
+
+               /*
+                * prepare src MACs for each port.
+                */
+               rte_ether_addr_copy(
+                       &ports_eth_addr[portid],
+                       (struct rte_ether_addr *)(val_eth + portid) + 1);
+
+               /* Init memory */
+               if (!per_port_pool) {
+                       /* portid = 0; this is *not* signifying the first port,
+                        * rather, it signifies that portid is ignored.
+                        */
+                       ret = init_mem(0, NB_MBUF(nb_ports));
+               } else {
+                       ret = init_mem(portid, NB_MBUF(1));
+               }
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "init_mem() failed\n");
+
+               /* Init one TX queue per couple (lcore,port) */
+               queueid = 0;
+               for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+                       if (rte_lcore_is_enabled(lcore_id) == 0)
+                               continue;
+
+                       qconf = &lcore_conf[lcore_id];
+
+                       if (numa_on)
+                               socketid = (uint8_t)rte_lcore_to_socket_id(
+                                       lcore_id);
+                       else
+                               socketid = 0;
+
+                       printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
+                       fflush(stdout);
+
+                       txconf = &dev_info.default_txconf;
+                       txconf->offloads = local_port_conf.txmode.offloads;
+                       ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+                                                    socketid, txconf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE,
+                                        "rte_eth_tx_queue_setup: err=%d, "
+                                        "port=%d\n",
+                                        ret, portid);
+                       queueid++;
+               }
+
+               /* Setup ethdev node config */
+               ethdev_conf[nb_conf].port_id = portid;
+               ethdev_conf[nb_conf].num_rx_queues = nb_rx_queue;
+               ethdev_conf[nb_conf].num_tx_queues = n_tx_queue;
+               if (!per_port_pool)
+                       ethdev_conf[nb_conf].mp = pktmbuf_pool[0];
+
+               else
+                       ethdev_conf[nb_conf].mp = pktmbuf_pool[portid];
+               ethdev_conf[nb_conf].mp_count = NB_SOCKETS;
+
+               nb_conf++;
+               printf("\n");
+       }
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+               qconf = &lcore_conf[lcore_id];
+               printf("\nInitializing rx queues on lcore %u ... ", lcore_id);
+               fflush(stdout);
+               /* Init RX queues */
+               for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
+                       struct rte_eth_rxconf rxq_conf;
+
+                       portid = qconf->rx_queue_list[queue].port_id;
+                       queueid = qconf->rx_queue_list[queue].queue_id;
+
+                       if (numa_on)
+                               socketid = (uint8_t)rte_lcore_to_socket_id(
+                                       lcore_id);
+                       else
+                               socketid = 0;
+
+                       printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+                       fflush(stdout);
+
+                       rte_eth_dev_info_get(portid, &dev_info);
+                       rxq_conf = dev_info.default_rxconf;
+                       rxq_conf.offloads = port_conf.rxmode.offloads;
+                       if (!per_port_pool)
+                               ret = rte_eth_rx_queue_setup(
+                                       portid, queueid, nb_rxd, socketid,
+                                       &rxq_conf, pktmbuf_pool[0][socketid]);
+                       else
+                               ret = rte_eth_rx_queue_setup(
+                                       portid, queueid, nb_rxd, socketid,
+                                       &rxq_conf,
+                                       pktmbuf_pool[portid][socketid]);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE,
+                                        "rte_eth_rx_queue_setup: err=%d, "
+                                        "port=%d\n",
+                                        ret, portid);
+
+                       /* Add this queue node to its graph */
+                       snprintf(qconf->rx_queue_list[queue].node_name,
+                                RTE_NODE_NAMESIZE, "ethdev_rx-%u-%u", portid,
+                                queueid);
+               }
+
+               /* Alloc a graph to this lcore only if source exists  */
+               if (qconf->n_rx_queue)
+                       nb_graphs++;
+       }
+
+       printf("\n");
+
+       /* Ethdev node config, skip rx queue mapping */
+       ret = rte_node_eth_config(ethdev_conf, nb_conf, nb_graphs);
+       if (ret)
+               rte_exit(EXIT_FAILURE, "rte_node_eth_config: err=%d\n", ret);
+
+       /* Start ports */
+       RTE_ETH_FOREACH_DEV(portid)
+       {
+               if ((enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "rte_eth_dev_start: err=%d, port=%d\n", ret,
+                                portid);
+
+               /*
+                * If enabled, put device in promiscuous mode.
+                * This allows IO forwarding mode to forward packets
+                * to itself through 2 cross-connected  ports of the
+                * target machine.
+                */
+               if (promiscuous_on)
+                       rte_eth_promiscuous_enable(portid);
+       }
+
+       printf("\n");
+
+       check_all_ports_link_status(enabled_port_mask);
+
+       /* Graph Initialization */
+       nb_patterns = RTE_DIM(default_patterns);
+       node_patterns = malloc((MAX_RX_QUEUE_PER_LCORE + nb_patterns) *
+                              sizeof(*node_patterns));
+       if (!node_patterns)
+               return -ENOMEM;
+       memcpy(node_patterns, default_patterns,
+              nb_patterns * sizeof(*node_patterns));
+
+       memset(&graph_conf, 0, sizeof(graph_conf));
+       graph_conf.node_patterns = node_patterns;
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               rte_graph_t graph_id;
+               rte_edge_t i;
+
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+
+               qconf = &lcore_conf[lcore_id];
+
+               /* Skip graph creation if no source exists */
+               if (!qconf->n_rx_queue)
+                       continue;
+
+               /* Add rx node patterns of this lcore */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+                       graph_conf.node_patterns[nb_patterns + i] =
+                               qconf->rx_queue_list[i].node_name;
+               }
+
+               graph_conf.nb_node_patterns = nb_patterns + i;
+               graph_conf.socket_id = rte_lcore_to_socket_id(lcore_id);
+
+               snprintf(qconf->name, sizeof(qconf->name), "worker_%u",
+                        lcore_id);
+
+               graph_id = rte_graph_create(qconf->name, &graph_conf);
+               if (graph_id == RTE_GRAPH_ID_INVALID)
+                       rte_exit(EXIT_FAILURE,
+                                "rte_graph_create(): graph_id invalid"
+                                " for lcore %u\n", lcore_id);
+
+               qconf->graph_id = graph_id;
+               qconf->graph = rte_graph_lookup(qconf->name);
+               if (!qconf->graph)
+                       rte_exit(EXIT_FAILURE,
+                                "rte_graph_lookup(): graph %s not found\n",
+                                qconf->name);
+       }
+
+       memset(&rewrite_data, 0, sizeof(rewrite_data));
+       rewrite_len = sizeof(rewrite_data);
+
+       /* Add route to ip4 graph infra */
+       for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
+               char route_str[INET6_ADDRSTRLEN * 4];
+               char abuf[INET6_ADDRSTRLEN];
+               struct in_addr in;
+               uint32_t dst_port;
+
+               /* Skip unused ports */
+               if ((1 << ipv4_l3fwd_lpm_route_array[i].if_out &
+                    enabled_port_mask) == 0)
+                       continue;
+
+               dst_port = ipv4_l3fwd_lpm_route_array[i].if_out;
+
+               in.s_addr = htonl(ipv4_l3fwd_lpm_route_array[i].ip);
+               snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
+                        inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
+                        ipv4_l3fwd_lpm_route_array[i].depth,
+                        ipv4_l3fwd_lpm_route_array[i].if_out);
+
+               /* Use route index 'i' as next hop id */
+               ret = rte_node_ip4_route_add(
+                       ipv4_l3fwd_lpm_route_array[i].ip,
+                       ipv4_l3fwd_lpm_route_array[i].depth, i,
+                       RTE_NODE_IP4_LOOKUP_NEXT_REWRITE);
+
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Unable to add ip4 route %s to graph\n",
+                                route_str);
+
+               memcpy(rewrite_data, val_eth + dst_port, rewrite_len);
+
+               /* Add next hop rewrite data for id 'i' */
+               ret = rte_node_ip4_rewrite_add(i, rewrite_data,
+                                              rewrite_len, dst_port);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Unable to add next hop %u for "
+                                "route %s\n", i, route_str);
+
+               RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
+                       route_str, i);
+       }
+
+       /* Launch per-lcore init on every slave lcore */
+       rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MASTER);
+
+       /* Accumulate and print stats on master until exit */
+       if (rte_graph_has_stats_feature())
+               print_stats();
+
+       /* Wait for slave cores to exit */
+       ret = 0;
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               ret = rte_eal_wait_lcore(lcore_id);
+               /* Destroy graph */
+               if (ret < 0 || rte_graph_destroy(
+                       rte_graph_from_name(lcore_conf[lcore_id].name))) {
+                       ret = -1;
+                       break;
+               }
+       }
+       free(node_patterns);
+
+       /* Stop ports */
+       RTE_ETH_FOREACH_DEV(portid) {
+               if ((enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+               printf("Closing port %d...", portid);
+               rte_eth_dev_stop(portid);
+               rte_eth_dev_close(portid);
+               printf(" Done\n");
+       }
        printf("Bye...\n");
 
        return ret;