examples: use new API to create control threads
[dpdk.git] / examples / vhost / main.c
index e706d88..1659ef3 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <arpa/inet.h>
 #include <rte_log.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
+#include <rte_pause.h>
 
 #include "main.h"
 
 /* the maximum number of external ports supported */
 #define MAX_SUP_PORTS 1
 
-/*
- * Calculate the number of buffers needed per port
- */
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) +            \
-                                                       (num_switching_cores*MAX_PKT_BURST) +                   \
-                                                       (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
-                                                       ((num_switching_cores+1)*MBUF_CACHE_SIZE))
-
 #define MBUF_CACHE_SIZE        128
 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
 
-#define MAX_PKT_BURST 32               /* Max burst size for RX/TX */
 #define BURST_TX_DRAIN_US 100  /* TX drain every ~100us */
 
 #define BURST_RX_WAIT_US 15    /* Defines how long we wait between retries on RX */
@@ -98,9 +61,6 @@
 /* Size of buffers used for snprintfs. */
 #define MAX_PRINT_BUFF 6072
 
-/* Maximum character device basename size. */
-#define MAX_BASENAME_SZ 10
-
 /* Maximum long option length for option parsing. */
 #define MAX_LONG_OPT_SZ 64
 
@@ -110,9 +70,6 @@ static uint32_t enabled_port_mask = 0;
 /* Promiscuous mode */
 static uint32_t promiscuous;
 
-/*Number of switching cores enabled*/
-static uint32_t num_switching_cores = 0;
-
 /* number of devices/queues to support*/
 static uint32_t num_queues = 0;
 static uint32_t num_devices;
@@ -120,9 +77,6 @@ static uint32_t num_devices;
 static struct rte_mempool *mbuf_pool;
 static int mergeable;
 
-/* Do vlan strip on host, enabled on default */
-static uint32_t vlan_strip = 1;
-
 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
 typedef enum {
        VM2VM_DISABLED = 0,
@@ -143,34 +97,42 @@ static uint32_t enable_tx_csum;
 /* Disable TSO offload */
 static uint32_t enable_tso;
 
+static int client_mode;
+static int dequeue_zero_copy;
+
+static int builtin_net_driver;
+
 /* Specify timeout (in useconds) between retries on RX. */
 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
 /* Specify the number of retries on RX. */
 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
 
-/* Character device basename. Can be set by user. */
-static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
+/* Socket file paths. Can be set by user */
+static char *socket_files;
+static int nb_sockets;
 
 /* empty vmdq configuration structure. Filled in programatically */
 static struct rte_eth_conf vmdq_conf_default = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
                .split_hdr_size = 0,
-               .header_split   = 0, /**< Header Split disabled */
-               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
-               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .ignore_offload_bitfield = 1,
                /*
-                * It is necessary for 1G NIC such as I350,
+                * VLAN strip is necessary for 1G NIC such as I350,
                 * this fixes bug of ipv4 forwarding in guest can't
                 * forward pakets from one virtio dev to another virtio dev.
                 */
-               .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
-               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+               .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
+                            DEV_RX_OFFLOAD_VLAN_STRIP),
        },
 
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
+               .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                            DEV_TX_OFFLOAD_TCP_CKSUM |
+                            DEV_TX_OFFLOAD_VLAN_INSERT |
+                            DEV_TX_OFFLOAD_MULTI_SEGS |
+                            DEV_TX_OFFLOAD_TCP_TSO),
        },
        .rx_adv_conf = {
                /*
@@ -187,8 +149,9 @@ static struct rte_eth_conf vmdq_conf_default = {
        },
 };
 
+
 static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports = 0; /**< The number of ports specified in command line */
 static uint16_t num_pf_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
@@ -223,17 +186,10 @@ struct mbuf_table {
 /* TX queue for each data core. */
 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
 
+#define MBUF_TABLE_DRAIN_TSC   ((rte_get_tsc_hz() + US_PER_S - 1) \
+                                / US_PER_S * BURST_TX_DRAIN_US)
 #define VLAN_HLEN       4
 
-/* Per-device statistics struct */
-struct device_statistics {
-       uint64_t tx_total;
-       rte_atomic64_t rx_total_atomic;
-       uint64_t tx;
-       rte_atomic64_t rx_atomic;
-} __rte_cache_aligned;
-struct device_statistics dev_statistics[MAX_DEVICES];
-
 /*
  * Builds up the correct configuration for VMDQ VLAN pool map
  * according to the pool & queue limits.
@@ -283,7 +239,7 @@ validate_num_devices(uint32_t max_nb_devices)
  * coming from the mbuf_pool passed as parameter
  */
 static inline int
-port_init(uint8_t port)
+port_init(uint16_t port)
 {
        struct rte_eth_dev_info dev_info;
        struct rte_eth_conf port_conf;
@@ -297,24 +253,27 @@ port_init(uint8_t port)
        /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
        rte_eth_dev_info_get (port, &dev_info);
 
-       if (dev_info.max_rx_queues > MAX_QUEUES) {
-               rte_exit(EXIT_FAILURE,
-                       "please define MAX_QUEUES no less than %u in %s\n",
-                       dev_info.max_rx_queues, __FILE__);
-       }
-
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
        rxconf->rx_drop_en = 1;
-
-       /* Enable vlan offload */
-       txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
+       txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
 
        /*configure the number of supported virtio devices based on VMDQ limits */
        num_devices = dev_info.max_vmdq_pools;
 
        rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
        tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+
+       /*
+        * When dequeue zero copy is enabled, guest Tx used vring will be
+        * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
+        * (tx_ring_size here) must be small enough so that the driver will
+        * hit the free threshold easily and free mbufs timely. Otherwise,
+        * guest Tx vring would be starved.
+        */
+       if (dequeue_zero_copy)
+               tx_ring_size = 64;
+
        tx_rings = (uint16_t)rte_lcore_count();
 
        retval = validate_num_devices(MAX_DEVICES);
@@ -335,43 +294,66 @@ port_init(uint8_t port)
        printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
                num_pf_queues, num_devices, queues_per_pool);
 
-       if (port >= rte_eth_dev_count()) return -1;
-
-       if (enable_tx_csum == 0)
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
-
-       if (enable_tso == 0) {
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
-       }
+       if (!rte_eth_dev_is_valid_port(port))
+               return -1;
 
        rx_rings = (uint16_t)dev_info.max_rx_queues;
+       if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+               port_conf.txmode.offloads |=
+                       DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        /* Configure ethernet device. */
        retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
-       if (retval != 0)
+       if (retval != 0) {
+               RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
+                       port, strerror(-retval));
                return retval;
+       }
+
+       retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+               &tx_ring_size);
+       if (retval != 0) {
+               RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
+                       "for port %u: %s.\n", port, strerror(-retval));
+               return retval;
+       }
+       if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
+               RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
+                       "for Rx queues on port %u.\n", port);
+               return -1;
+       }
 
        /* Setup the queues. */
+       rxconf->offloads = port_conf.rxmode.offloads;
        for (q = 0; q < rx_rings; q ++) {
                retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
                                                rte_eth_dev_socket_id(port),
                                                rxconf,
                                                mbuf_pool);
-               if (retval < 0)
+               if (retval < 0) {
+                       RTE_LOG(ERR, VHOST_PORT,
+                               "Failed to setup rx queue %u of port %u: %s.\n",
+                               q, port, strerror(-retval));
                        return retval;
+               }
        }
+       txconf->offloads = port_conf.txmode.offloads;
        for (q = 0; q < tx_rings; q ++) {
                retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
                                                rte_eth_dev_socket_id(port),
                                                txconf);
-               if (retval < 0)
+               if (retval < 0) {
+                       RTE_LOG(ERR, VHOST_PORT,
+                               "Failed to setup tx queue %u of port %u: %s.\n",
+                               q, port, strerror(-retval));
                        return retval;
+               }
        }
 
        /* Start the device. */
        retval  = rte_eth_dev_start(port);
        if (retval < 0) {
-               RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n");
+               RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
+                       port, strerror(-retval));
                return retval;
        }
 
@@ -382,7 +364,7 @@ port_init(uint8_t port)
        RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
        RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
                        " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       (unsigned)port,
+                       port,
                        vmdq_ports_eth_addr[port].addr_bytes[0],
                        vmdq_ports_eth_addr[port].addr_bytes[1],
                        vmdq_ports_eth_addr[port].addr_bytes[2],
@@ -394,17 +376,18 @@ port_init(uint8_t port)
 }
 
 /*
- * Set character device basename.
+ * Set socket file path.
  */
 static int
-us_vhost_parse_basename(const char *q_arg)
+us_vhost_parse_socket_path(const char *q_arg)
 {
        /* parse number string */
-
-       if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
+       if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
                return -1;
-       else
-               snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
+
+       socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
+       snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
+       nb_sockets++;
 
        return 0;
 }
@@ -464,7 +447,7 @@ us_vhost_usage(const char *prgname)
        RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
        "               --vm2vm [0|1|2]\n"
        "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
-       "               --dev-basename <name>\n"
+       "               --socket-file <path>\n"
        "               --nb-devices ND\n"
        "               -p PORTMASK: Set mask for ports to be used by application\n"
        "               --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
@@ -472,11 +455,12 @@ us_vhost_usage(const char *prgname)
        "               --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
        "               --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
        "               --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
-       "               --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
        "               --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
-       "               --dev-basename: The basename to be used for the character device.\n"
+       "               --socket-file: The path of the socket file.\n"
        "               --tx-csum [0|1] disable/enable TX checksum offload.\n"
-       "               --tso [0|1] disable/enable TCP segment offload.\n",
+       "               --tso [0|1] disable/enable TCP segment offload.\n"
+       "               --client register a vhost-user socket as client mode.\n"
+       "               --dequeue-zero-copy enables dequeue zero copy\n",
               prgname);
 }
 
@@ -496,11 +480,13 @@ us_vhost_parse_args(int argc, char **argv)
                {"rx-retry-delay", required_argument, NULL, 0},
                {"rx-retry-num", required_argument, NULL, 0},
                {"mergeable", required_argument, NULL, 0},
-               {"vlan-strip", required_argument, NULL, 0},
                {"stats", required_argument, NULL, 0},
-               {"dev-basename", required_argument, NULL, 0},
+               {"socket-file", required_argument, NULL, 0},
                {"tx-csum", required_argument, NULL, 0},
                {"tso", required_argument, NULL, 0},
+               {"client", no_argument, &client_mode, 1},
+               {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
+               {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
                {NULL, 0, 0, 0},
        };
 
@@ -523,7 +509,6 @@ us_vhost_parse_args(int argc, char **argv)
                        vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
                                ETH_VMDQ_ACCEPT_BROADCAST |
                                ETH_VMDQ_ACCEPT_MULTICAST;
-                       rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
 
                        break;
 
@@ -611,34 +596,20 @@ us_vhost_parse_args(int argc, char **argv)
                                } else {
                                        mergeable = !!ret;
                                        if (ret) {
-                                               vmdq_conf_default.rxmode.jumbo_frame = 1;
+                                               vmdq_conf_default.rxmode.offloads |=
+                                                       DEV_RX_OFFLOAD_JUMBO_FRAME;
                                                vmdq_conf_default.rxmode.max_rx_pkt_len
                                                        = JUMBO_FRAME_MAX_SIZE;
                                        }
                                }
                        }
 
-                       /* Enable/disable RX VLAN strip on host. */
-                       if (!strncmp(long_option[option_index].name,
-                               "vlan-strip", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, 1);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG,
-                                               "Invalid argument for VLAN strip [0|1]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       vlan_strip = !!ret;
-                                       vmdq_conf_default.rxmode.hw_vlan_strip =
-                                               vlan_strip;
-                               }
-                       }
-
                        /* Enable/disable stats. */
                        if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
                                ret = parse_num_opt(optarg, INT32_MAX);
                                if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
+                                       RTE_LOG(INFO, VHOST_CONFIG,
+                                               "Invalid argument for stats [0..N]\n");
                                        us_vhost_usage(prgname);
                                        return -1;
                                } else {
@@ -646,10 +617,13 @@ us_vhost_parse_args(int argc, char **argv)
                                }
                        }
 
-                       /* Set character device basename. */
-                       if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
-                               if (us_vhost_parse_basename(optarg) == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
+                       /* Set socket file path. */
+                       if (!strncmp(long_option[option_index].name,
+                                               "socket-file", MAX_LONG_OPT_SZ)) {
+                               if (us_vhost_parse_socket_path(optarg) == -1) {
+                                       RTE_LOG(INFO, VHOST_CONFIG,
+                                       "Invalid argument for socket name (Max %d characters)\n",
+                                       PATH_MAX);
                                        us_vhost_usage(prgname);
                                        return -1;
                                }
@@ -666,7 +640,7 @@ us_vhost_parse_args(int argc, char **argv)
 
        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
                if (enabled_port_mask & (1 << i))
-                       ports[num_ports++] = (uint8_t)i;
+                       ports[num_ports++] = i;
        }
 
        if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
@@ -694,9 +668,10 @@ static unsigned check_ports_num(unsigned nb_ports)
        }
 
        for (portid = 0; portid < num_ports; portid ++) {
-               if (ports[portid] >= nb_ports) {
-                       RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
-                               ports[portid], (nb_ports - 1));
+               if (!rte_eth_dev_is_valid_port(ports[portid])) {
+                       RTE_LOG(INFO, VHOST_PORT,
+                               "\nSpecified port ID(%u) is not valid\n",
+                               ports[portid]);
                        ports[portid] = INVALID_PORT_ID;
                        valid_num_ports--;
                }
@@ -704,12 +679,12 @@ static unsigned check_ports_num(unsigned nb_ports)
        return valid_num_ports;
 }
 
-static inline struct vhost_dev *__attribute__((always_inline))
+static __rte_always_inline struct vhost_dev *
 find_vhost_dev(struct ether_addr *mac)
 {
        struct vhost_dev *vdev;
 
-       TAILQ_FOREACH(vdev, &vhost_dev_list, next) {
+       TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
                if (vdev->ready == DEVICE_RX &&
                    is_same_ether_addr(mac, &vdev->mac_address))
                        return vdev;
@@ -726,7 +701,6 @@ static int
 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
        struct ether_hdr *pkt_hdr;
-       struct virtio_net *dev = vdev->dev;
        int i, ret;
 
        /* Learn MAC address of guest device from packet */
@@ -734,8 +708,8 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 
        if (find_vhost_dev(&pkt_hdr->s_addr)) {
                RTE_LOG(ERR, VHOST_DATA,
-                       "Device (%" PRIu64 ") is using a registered MAC!\n",
-                       dev->device_fh);
+                       "(%d) device is using a registered MAC!\n",
+                       vdev->vid);
                return -1;
        }
 
@@ -743,11 +717,12 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
                vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
 
        /* vlan_tag currently uses the device_id. */
-       vdev->vlan_tag = vlan_tags[dev->device_fh];
+       vdev->vlan_tag = vlan_tags[vdev->vid];
 
        /* Print out VMDQ registration info. */
-       RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
-               dev->device_fh,
+       RTE_LOG(INFO, VHOST_DATA,
+               "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
+               vdev->vid,
                vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
                vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
                vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
@@ -755,15 +730,13 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 
        /* Register the MAC address. */
        ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
-                               (uint32_t)dev->device_fh + vmdq_pool_base);
+                               (uint32_t)vdev->vid + vmdq_pool_base);
        if (ret)
-               RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
-                                       dev->device_fh);
+               RTE_LOG(ERR, VHOST_DATA,
+                       "(%d) failed to add device MAC address to VMDQ\n",
+                       vdev->vid);
 
-       /* Enable stripping of the vlan tag as we handle routing. */
-       if (vlan_strip)
-               rte_eth_dev_set_vlan_strip_on_queue(ports[0],
-                       (uint16_t)vdev->vmdq_rx_q, 1);
+       rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
 
        /* Set device as ready for RX. */
        vdev->ready = DEVICE_RX;
@@ -806,17 +779,35 @@ unlink_vmdq(struct vhost_dev *vdev)
        }
 }
 
+static __rte_always_inline void
+virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+           struct rte_mbuf *m)
+{
+       uint16_t ret;
+
+       if (builtin_net_driver) {
+               ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+       } else {
+               ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+       }
+
+       if (enable_stats) {
+               rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
+               rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+               src_vdev->stats.tx_total++;
+               src_vdev->stats.tx += ret;
+       }
+}
+
 /*
  * Check if the packet destination MAC address is for a local device. If so then put
  * the packet on that devices RX queue. If not then return.
  */
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
        struct ether_hdr *pkt_hdr;
-       uint64_t ret = 0;
        struct vhost_dev *dst_vdev;
-       uint64_t fh;
 
        pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
 
@@ -824,32 +815,23 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
        if (!dst_vdev)
                return -1;
 
-       fh = dst_vdev->dev->device_fh;
-       if (fh == vdev->dev->device_fh) {
-               RTE_LOG(DEBUG, VHOST_DATA,
-                       "(%" PRIu64 ") TX: src and dst MAC is same. "
-                       "Dropping packet.\n", fh);
+       if (vdev->vid == dst_vdev->vid) {
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
+                       "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+                       vdev->vid);
                return 0;
        }
 
-       RTE_LOG(DEBUG, VHOST_DATA,
-               "(%" PRIu64 ") TX: MAC address is local\n", fh);
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
+               "(%d) TX: MAC address is local\n", dst_vdev->vid);
 
        if (unlikely(dst_vdev->remove)) {
-               RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
-                       "Device is marked for removal\n", fh);
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
+                       "(%d) device is marked for removal\n", dst_vdev->vid);
                return 0;
        }
 
-       /* send the packet to the local virtio device */
-       ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1);
-       if (enable_stats) {
-               rte_atomic64_inc(&dev_statistics[fh].rx_total_atomic);
-               rte_atomic64_add(&dev_statistics[fh].rx_atomic, ret);
-               dev_statistics[vdev->dev->device_fh].tx_total++;
-               dev_statistics[vdev->dev->device_fh].tx += ret;
-       }
-
+       virtio_xmit(dst_vdev, vdev, m);
        return 0;
 }
 
@@ -857,8 +839,8 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
  * Check if the destination MAC of a packet is one local VM,
  * and get its vlan tag, and offset if it is.
  */
-static inline int __attribute__((always_inline))
-find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
+static __rte_always_inline int
+find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        uint32_t *offset, uint16_t *vlan_tag)
 {
        struct vhost_dev *dst_vdev;
@@ -868,10 +850,10 @@ find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
        if (!dst_vdev)
                return 0;
 
-       if (dst_vdev->dev->device_fh == dev->device_fh) {
-               RTE_LOG(DEBUG, VHOST_DATA,
-                       "(%" PRIu64 ") TX: src and dst MAC is same. "
-                       " Dropping packet.\n", dst_vdev->dev->device_fh);
+       if (vdev->vid == dst_vdev->vid) {
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
+                       "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+                       vdev->vid);
                return -1;
        }
 
@@ -881,12 +863,11 @@ find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
         * the packet length by plus it.
         */
        *offset  = VLAN_HLEN;
-       *vlan_tag = vlan_tags[(uint16_t)dst_vdev->dev->device_fh];
+       *vlan_tag = vlan_tags[vdev->vid];
 
-       RTE_LOG(DEBUG, VHOST_DATA,
-               "(%" PRIu64 ") TX: pkt to local VM device id: (%" PRIu64 ") "
-               "vlan tag: %u.\n",
-               dev->device_fh, dst_vdev->dev->device_fh, *vlan_tag);
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
+               "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
+               vdev->vid, dst_vdev->vid, *vlan_tag);
 
        return 0;
 }
@@ -919,20 +900,50 @@ static void virtio_tx_offload(struct rte_mbuf *m)
        tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
 }
 
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+       while (n--)
+               rte_pktmbuf_free(pkts[n]);
+}
+
+static __rte_always_inline void
+do_drain_mbuf_table(struct mbuf_table *tx_q)
+{
+       uint16_t count;
+
+       count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
+                                tx_q->m_table, tx_q->len);
+       if (unlikely(count < tx_q->len))
+               free_pkts(&tx_q->m_table[count], tx_q->len - count);
+
+       tx_q->len = 0;
+}
+
 /*
- * This function routes the TX packet to the correct interface. This may be a local device
- * or the physical port.
+ * This function routes the TX packet to the correct interface. This
+ * may be a local device or the physical port.
  */
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 {
        struct mbuf_table *tx_q;
-       struct rte_mbuf **m_table;
-       unsigned len, ret, offset = 0;
+       unsigned offset = 0;
        const uint16_t lcore_id = rte_lcore_id();
-       struct virtio_net *dev = vdev->dev;
        struct ether_hdr *nh;
 
+
+       nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
+               struct vhost_dev *vdev2;
+
+               TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
+                       if (vdev2 != vdev)
+                               virtio_xmit(vdev2, vdev, m);
+               }
+               goto queue2nic;
+       }
+
        /*check if destination is local VM*/
        if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
                rte_pktmbuf_free(m);
@@ -940,18 +951,20 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
        }
 
        if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
-               if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) {
+               if (unlikely(find_local_dest(vdev, m, &offset,
+                                            &vlan_tag) != 0)) {
                        rte_pktmbuf_free(m);
                        return;
                }
        }
 
-       RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
-               "MAC address is external\n", dev->device_fh);
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
+               "(%d) TX: MAC address is external\n", vdev->vid);
+
+queue2nic:
 
        /*Add packet to the port tx queue*/
        tx_q = &lcore_tx_queue[lcore_id];
-       len = tx_q->len;
 
        nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
        if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
@@ -989,55 +1002,135 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
        if (m->ol_flags & PKT_TX_TCP_SEG)
                virtio_tx_offload(m);
 
-       tx_q->m_table[len] = m;
-       len++;
+       tx_q->m_table[tx_q->len++] = m;
        if (enable_stats) {
-               dev_statistics[dev->device_fh].tx_total++;
-               dev_statistics[dev->device_fh].tx++;
+               vdev->stats.tx_total++;
+               vdev->stats.tx++;
        }
 
-       if (unlikely(len == MAX_PKT_BURST)) {
-               m_table = (struct rte_mbuf **)tx_q->m_table;
-               ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
-               /* Free any buffers not handled by TX and update the port stats. */
-               if (unlikely(ret < len)) {
-                       do {
-                               rte_pktmbuf_free(m_table[ret]);
-                       } while (++ret < len);
+       if (unlikely(tx_q->len == MAX_PKT_BURST))
+               do_drain_mbuf_table(tx_q);
+}
+
+
+static __rte_always_inline void
+drain_mbuf_table(struct mbuf_table *tx_q)
+{
+       static uint64_t prev_tsc;
+       uint64_t cur_tsc;
+
+       if (tx_q->len == 0)
+               return;
+
+       cur_tsc = rte_rdtsc();
+       if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
+               prev_tsc = cur_tsc;
+
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
+                       "TX queue drained after timeout with burst size %u\n",
+                       tx_q->len);
+               do_drain_mbuf_table(tx_q);
+       }
+}
+
+static __rte_always_inline void
+drain_eth_rx(struct vhost_dev *vdev)
+{
+       uint16_t rx_count, enqueue_count;
+       struct rte_mbuf *pkts[MAX_PKT_BURST];
+
+       rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
+                                   pkts, MAX_PKT_BURST);
+       if (!rx_count)
+               return;
+
+       /*
+        * When "enable_retry" is set, here we wait and retry when there
+        * is no enough free slots in the queue to hold @rx_count packets,
+        * to diminish packet loss.
+        */
+       if (enable_retry &&
+           unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
+                       VIRTIO_RXQ))) {
+               uint32_t retry;
+
+               for (retry = 0; retry < burst_rx_retry_num; retry++) {
+                       rte_delay_us(burst_rx_delay_time);
+                       if (rx_count <= rte_vhost_avail_entries(vdev->vid,
+                                       VIRTIO_RXQ))
+                               break;
                }
+       }
 
-               len = 0;
+       if (builtin_net_driver) {
+               enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
+                                               pkts, rx_count);
+       } else {
+               enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+                                               pkts, rx_count);
+       }
+       if (enable_stats) {
+               rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
+               rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
        }
 
-       tx_q->len = len;
-       return;
+       free_pkts(pkts, rx_count);
 }
+
+static __rte_always_inline void
+drain_virtio_tx(struct vhost_dev *vdev)
+{
+       struct rte_mbuf *pkts[MAX_PKT_BURST];
+       uint16_t count;
+       uint16_t i;
+
+       if (builtin_net_driver) {
+               count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
+                                       pkts, MAX_PKT_BURST);
+       } else {
+               count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
+                                       mbuf_pool, pkts, MAX_PKT_BURST);
+       }
+
+       /* setup VMDq for the first packet */
+       if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
+               if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
+                       free_pkts(pkts, count);
+       }
+
+       for (i = 0; i < count; ++i)
+               virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
+}
+
 /*
- * This function is called by each data core. It handles all RX/TX registered with the
- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
- * with all devices in the main linked list.
+ * Main function of vhost-switch. It basically does:
+ *
+ * for each vhost device {
+ *    - drain_eth_rx()
+ *
+ *      Which drains the host eth Rx queue linked to the vhost device,
+ *      and deliver all of them to guest virito Rx ring associated with
+ *      this vhost device.
+ *
+ *    - drain_virtio_tx()
+ *
+ *      Which drains the guest virtio Tx queue and deliver all of them
+ *      to the target, which could be another vhost device, or the
+ *      physical eth dev. The route is done in function "virtio_tx_route".
+ * }
  */
 static int
-switch_worker(__attribute__((unused)) void *arg)
+switch_worker(void *arg __rte_unused)
 {
-       struct virtio_net *dev = NULL;
-       struct vhost_dev *vdev = NULL;
-       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       unsigned i;
+       unsigned lcore_id = rte_lcore_id();
+       struct vhost_dev *vdev;
        struct mbuf_table *tx_q;
-       const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
-       uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
-       unsigned ret, i;
-       const uint16_t lcore_id = rte_lcore_id();
-       const uint16_t num_cores = (uint16_t)rte_lcore_count();
-       uint16_t rx_count = 0;
-       uint16_t tx_count;
-       uint32_t retry = 0;
 
        RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
-       prev_tsc = 0;
 
        tx_q = &lcore_tx_queue[lcore_id];
-       for (i = 0; i < num_cores; i ++) {
+       for (i = 0; i < rte_lcore_count(); i++) {
                if (lcore_ids[i] == lcore_id) {
                        tx_q->txq_id = i;
                        break;
@@ -1045,34 +1138,7 @@ switch_worker(__attribute__((unused)) void *arg)
        }
 
        while(1) {
-               cur_tsc = rte_rdtsc();
-               /*
-                * TX burst queue drain
-                */
-               diff_tsc = cur_tsc - prev_tsc;
-               if (unlikely(diff_tsc > drain_tsc)) {
-
-                       if (tx_q->len) {
-                               RTE_LOG(DEBUG, VHOST_DATA,
-                                       "TX queue drained after timeout with burst size %u\n",
-                                       tx_q->len);
-
-                               /*Tx any packets in the queue*/
-                               ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
-                                                                          (struct rte_mbuf **)tx_q->m_table,
-                                                                          (uint16_t)tx_q->len);
-                               if (unlikely(ret < tx_q->len)) {
-                                       do {
-                                               rte_pktmbuf_free(tx_q->m_table[ret]);
-                                       } while (++ret < tx_q->len);
-                               }
-
-                               tx_q->len = 0;
-                       }
-
-                       prev_tsc = cur_tsc;
-
-               }
+               drain_mbuf_table(tx_q);
 
                /*
                 * Inform the configuration core that we have exited the
@@ -1082,69 +1148,21 @@ switch_worker(__attribute__((unused)) void *arg)
                        lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
 
                /*
-                * Process devices
+                * Process vhost devices
                 */
-               TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, next) {
-                       uint64_t fh;
-
-                       dev = vdev->dev;
-                       fh  = dev->device_fh;
-
+               TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
+                             lcore_vdev_entry) {
                        if (unlikely(vdev->remove)) {
                                unlink_vmdq(vdev);
                                vdev->ready = DEVICE_SAFE_REMOVE;
                                continue;
                        }
 
-                       if (likely(vdev->ready == DEVICE_RX)) {
-                               /*Handle guest RX*/
-                               rx_count = rte_eth_rx_burst(ports[0],
-                                       vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
-                               if (rx_count) {
-                                       /*
-                                       * Retry is enabled and the queue is full then we wait and retry to avoid packet loss
-                                       * Here MAX_PKT_BURST must be less than virtio queue size
-                                       */
-                                       if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) {
-                                               for (retry = 0; retry < burst_rx_retry_num; retry++) {
-                                                       rte_delay_us(burst_rx_delay_time);
-                                                       if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
-                                                               break;
-                                               }
-                                       }
-                                       ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
-                                       if (enable_stats) {
-                                               rte_atomic64_add(
-                                                       &dev_statistics[fh].rx_total_atomic,
-                                                       rx_count);
-                                               rte_atomic64_add(
-                                                       &dev_statistics[fh].rx_atomic,
-                                                       ret_count);
-                                       }
-                                       while (likely(rx_count)) {
-                                               rx_count--;
-                                               rte_pktmbuf_free(pkts_burst[rx_count]);
-                                       }
+                       if (likely(vdev->ready == DEVICE_RX))
+                               drain_eth_rx(vdev);
 
-                               }
-                       }
-
-                       if (likely(!vdev->remove)) {
-                               /* Handle guest TX*/
-                               tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
-                               /* If this is the first received packet we need to learn the MAC and setup VMDQ */
-                               if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
-                                       if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
-                                               while (tx_count)
-                                                       rte_pktmbuf_free(pkts_burst[--tx_count]);
-                                       }
-                               }
-                               for (i = 0; i < tx_count; ++i) {
-                                       virtio_tx_route(vdev, pkts_burst[i],
-                                               vlan_tags[(uint16_t)dev->device_fh]);
-                               }
-                       }
+                       if (likely(!vdev->remove))
+                               drain_virtio_tx(vdev);
                }
        }
 
@@ -1158,22 +1176,30 @@ switch_worker(__attribute__((unused)) void *arg)
  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
  */
 static void
-destroy_device (volatile struct virtio_net *dev)
+destroy_device(int vid)
 {
-       struct vhost_dev *vdev;
+       struct vhost_dev *vdev = NULL;
        int lcore;
 
-       dev->flags &= ~VIRTIO_DEV_RUNNING;
-
-       vdev = (struct vhost_dev *)dev->priv;
+       TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+               if (vdev->vid == vid)
+                       break;
+       }
+       if (!vdev)
+               return;
        /*set the remove flag. */
        vdev->remove = 1;
        while(vdev->ready != DEVICE_SAFE_REMOVE) {
                rte_pause();
        }
 
-       TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, next);
-       TAILQ_REMOVE(&vhost_dev_list, vdev, next);
+       if (builtin_net_driver)
+               vs_vhost_net_remove(vdev);
+
+       TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
+                    lcore_vdev_entry);
+       TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
+
 
        /* Set the dev_removal_flag on each lcore. */
        RTE_LCORE_FOREACH_SLAVE(lcore)
@@ -1192,8 +1218,8 @@ destroy_device (volatile struct virtio_net *dev)
        lcore_info[vdev->coreid].device_num--;
 
        RTE_LOG(INFO, VHOST_DATA,
-               "(%" PRIu64 ") Device has been removed from data core\n",
-               dev->device_fh);
+               "(%d) device has been removed from data core\n",
+               vdev->vid);
 
        rte_free(vdev);
 }
@@ -1203,7 +1229,7 @@ destroy_device (volatile struct virtio_net *dev)
  * and the allocated to a specific data core.
  */
 static int
-new_device (struct virtio_net *dev)
+new_device(int vid)
 {
        int lcore, core_add = 0;
        uint32_t device_num_min = num_devices;
@@ -1211,16 +1237,18 @@ new_device (struct virtio_net *dev)
 
        vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
        if (vdev == NULL) {
-               RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
-                       dev->device_fh);
+               RTE_LOG(INFO, VHOST_DATA,
+                       "(%d) couldn't allocate memory for vhost dev\n",
+                       vid);
                return -1;
        }
-       vdev->dev = dev;
-       dev->priv = vdev;
+       vdev->vid = vid;
+
+       if (builtin_net_driver)
+               vs_vhost_net_setup(vdev);
 
-       TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, next);
-       vdev->vmdq_rx_q
-               = dev->device_fh * queues_per_pool + vmdq_queue_base;
+       TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
+       vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
 
        /*reset ready flag*/
        vdev->ready = DEVICE_MAC_LEARNING;
@@ -1235,18 +1263,17 @@ new_device (struct virtio_net *dev)
        }
        vdev->coreid = core_add;
 
-       TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, next);
+       TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
+                         lcore_vdev_entry);
        lcore_info[vdev->coreid].device_num++;
 
-       /* Initialize device stats */
-       memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
-
        /* Disable notifications. */
-       rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
-       rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
-       dev->flags |= VIRTIO_DEV_RUNNING;
+       rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+       rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
 
-       RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, vdev->coreid);
+       RTE_LOG(INFO, VHOST_DATA,
+               "(%d) device has been added to data core %d\n",
+               vid, vdev->coreid);
 
        return 0;
 }
@@ -1255,7 +1282,7 @@ new_device (struct virtio_net *dev)
  * These callback allow devices to be added to the data core when configuration
  * has been fully complete.
  */
-static const struct virtio_net_device_ops virtio_net_device_ops =
+static const struct vhost_device_ops virtio_net_device_ops =
 {
        .new_device =  new_device,
        .destroy_device = destroy_device,
@@ -1265,13 +1292,12 @@ static const struct virtio_net_device_ops virtio_net_device_ops =
  * This is a thread will wake up after a period to print stats if the user has
  * enabled them.
  */
-static void
-print_stats(void)
+static void *
+print_stats(__rte_unused void *arg)
 {
        struct vhost_dev *vdev;
        uint64_t tx_dropped, rx_dropped;
        uint64_t tx, tx_total, rx, rx_total;
-       uint32_t device_fh;
        const char clr[] = { 27, '[', '2', 'J', '\0' };
        const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
 
@@ -1279,37 +1305,48 @@ print_stats(void)
                sleep(enable_stats);
 
                /* Clear screen and move to top left */
-               printf("%s%s", clr, top_left);
-
-               printf("\nDevice statistics ====================================");
+               printf("%s%s\n", clr, top_left);
+               printf("Device statistics =================================\n");
 
-               TAILQ_FOREACH(vdev, &vhost_dev_list, next) {
-                       device_fh = vdev->dev->device_fh;
-                       tx_total = dev_statistics[device_fh].tx_total;
-                       tx = dev_statistics[device_fh].tx;
+               TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+                       tx_total   = vdev->stats.tx_total;
+                       tx         = vdev->stats.tx;
                        tx_dropped = tx_total - tx;
-                       rx_total = rte_atomic64_read(
-                               &dev_statistics[device_fh].rx_total_atomic);
-                       rx = rte_atomic64_read(
-                               &dev_statistics[device_fh].rx_atomic);
+
+                       rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
+                       rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
                        rx_dropped = rx_total - rx;
 
-                       printf("\nStatistics for device %"PRIu32" ------------------------------"
-                                       "\nTX total:            %"PRIu64""
-                                       "\nTX dropped:          %"PRIu64""
-                                       "\nTX successful:               %"PRIu64""
-                                       "\nRX total:            %"PRIu64""
-                                       "\nRX dropped:          %"PRIu64""
-                                       "\nRX successful:               %"PRIu64"",
-                                       device_fh,
-                                       tx_total,
-                                       tx_dropped,
-                                       tx,
-                                       rx_total,
-                                       rx_dropped,
-                                       rx);
+                       printf("Statistics for device %d\n"
+                               "-----------------------\n"
+                               "TX total:              %" PRIu64 "\n"
+                               "TX dropped:            %" PRIu64 "\n"
+                               "TX successful:         %" PRIu64 "\n"
+                               "RX total:              %" PRIu64 "\n"
+                               "RX dropped:            %" PRIu64 "\n"
+                               "RX successful:         %" PRIu64 "\n",
+                               vdev->vid,
+                               tx_total, tx_dropped, tx,
+                               rx_total, rx_dropped, rx);
                }
-               printf("\n======================================================\n");
+
+               printf("===================================================\n");
+       }
+
+       return NULL;
+}
+
+static void
+unregister_drivers(int socket_num)
+{
+       int i, ret;
+
+       for (i = 0; i < socket_num; i++) {
+               ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
+               if (ret != 0)
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "Fail to unregister vhost driver for %s.\n",
+                               socket_files + i * PATH_MAX);
        }
 }
 
@@ -1318,25 +1355,74 @@ static void
 sigint_handler(__rte_unused int signum)
 {
        /* Unregister vhost driver. */
-       int ret = rte_vhost_driver_unregister((char *)&dev_basename);
-       if (ret != 0)
-               rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
+       unregister_drivers(nb_sockets);
+
        exit(0);
 }
 
 /*
- * Main function, does initialisation and calls the per-lcore functions. The CUSE
- * device is also registered here to handle the IOCTLs.
+ * While creating an mbuf pool, one key thing is to figure out how
+ * many mbuf entries is enough for our use. FYI, here are some
+ * guidelines:
+ *
+ * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
+ *
+ * - For each switch core (A CPU core does the packet switch), we need
+ *   also make some reservation for receiving the packets from virtio
+ *   Tx queue. How many is enough depends on the usage. It's normally
+ *   a simple calculation like following:
+ *
+ *       MAX_PKT_BURST * max packet size / mbuf size
+ *
+ *   So, we definitely need allocate more mbufs when TSO is enabled.
+ *
+ * - Similarly, for each switching core, we should serve @nr_rx_desc
+ *   mbufs for receiving the packets from physical NIC device.
+ *
+ * - We also need make sure, for each switch core, we have allocated
+ *   enough mbufs to fill up the mbuf cache.
+ */
+static void
+create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
+       uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+{
+       uint32_t nr_mbufs;
+       uint32_t nr_mbufs_per_core;
+       uint32_t mtu = 1500;
+
+       if (mergeable)
+               mtu = 9000;
+       if (enable_tso)
+               mtu = 64 * 1024;
+
+       nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
+                       (mbuf_size - RTE_PKTMBUF_HEADROOM);
+       nr_mbufs_per_core += nr_rx_desc;
+       nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
+
+       nr_mbufs  = nr_queues * nr_rx_desc;
+       nr_mbufs += nr_mbufs_per_core * nr_switch_core;
+       nr_mbufs *= nr_port;
+
+       mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
+                                           nr_mbuf_cache, 0, mbuf_size,
+                                           rte_socket_id());
+       if (mbuf_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+}
+
+/*
+ * Main function, does initialisation and calls the per-lcore functions.
  */
 int
 main(int argc, char *argv[])
 {
        unsigned lcore_id, core_id = 0;
        unsigned nb_ports, valid_num_ports;
-       int ret;
-       uint8_t portid;
+       int ret, i;
+       uint16_t portid;
        static pthread_t tid;
-       char thread_name[RTE_MAX_THREAD_NAME_LEN];
+       uint64_t flags = 0;
 
        signal(SIGINT, sigint_handler);
 
@@ -1352,22 +1438,18 @@ main(int argc, char *argv[])
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Invalid argument\n");
 
-       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
 
                if (rte_lcore_is_enabled(lcore_id))
-                       lcore_ids[core_id ++] = lcore_id;
+                       lcore_ids[core_id++] = lcore_id;
+       }
 
        if (rte_lcore_count() > RTE_MAX_LCORE)
                rte_exit(EXIT_FAILURE,"Not enough cores\n");
 
-       /*set the number of swithcing cores available*/
-       num_switching_cores = rte_lcore_count()-1;
-
        /* Get the number of physical ports. */
-       nb_ports = rte_eth_dev_count();
-       if (nb_ports > RTE_MAX_ETHPORTS)
-               nb_ports = RTE_MAX_ETHPORTS;
+       nb_ports = rte_eth_dev_count_avail();
 
        /*
         * Update the global var NUM_PORTS and global array PORTS
@@ -1381,12 +1463,14 @@ main(int argc, char *argv[])
                return -1;
        }
 
-       /* Create the mbuf pool. */
-       mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
-               NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE,
-               0, MBUF_DATA_SIZE, rte_socket_id());
-       if (mbuf_pool == NULL)
-               rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+       /*
+        * FIXME: here we are trying to allocate mbufs big enough for
+        * @MAX_QUEUES, but the truth is we're never going to use that
+        * many queues here. We probably should only do allocation for
+        * those queues we are going to use.
+        */
+       create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
+                        MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
 
        if (vm2vm_mode == VM2VM_HARDWARE) {
                /* Enable VT loop back to let L2 switch to do it. */
@@ -1396,7 +1480,7 @@ main(int argc, char *argv[])
        }
 
        /* initialize all ports */
-       for (portid = 0; portid < nb_ports; portid++) {
+       RTE_ETH_FOREACH_DEV(portid) {
                /* skip ports that are not enabled */
                if ((enabled_port_mask & (1 << portid)) == 0) {
                        RTE_LOG(INFO, VHOST_PORT,
@@ -1408,40 +1492,80 @@ main(int argc, char *argv[])
                                "Cannot initialize network ports\n");
        }
 
-       /* Initialize device stats */
-       memset(&dev_statistics, 0, sizeof(dev_statistics));
-
        /* Enable stats if the user option is set. */
        if (enable_stats) {
-               ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
-               if (ret != 0)
+               ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
+                                       print_stats, NULL);
+               if (ret < 0)
                        rte_exit(EXIT_FAILURE,
                                "Cannot create print-stats thread\n");
-
-               /* Set thread_name for aid in debugging.  */
-               snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
-               ret = rte_thread_setname(tid, thread_name);
-               if (ret != 0)
-                       RTE_LOG(ERR, VHOST_CONFIG,
-                               "Cannot set print-stats name\n");
        }
 
        /* Launch all data cores. */
        RTE_LCORE_FOREACH_SLAVE(lcore_id)
                rte_eal_remote_launch(switch_worker, NULL, lcore_id);
 
-       if (mergeable == 0)
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
+       if (client_mode)
+               flags |= RTE_VHOST_USER_CLIENT;
+
+       if (dequeue_zero_copy)
+               flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+
+       /* Register vhost user driver to handle vhost messages. */
+       for (i = 0; i < nb_sockets; i++) {
+               char *file = socket_files + i * PATH_MAX;
+               ret = rte_vhost_driver_register(file, flags);
+               if (ret != 0) {
+                       unregister_drivers(i);
+                       rte_exit(EXIT_FAILURE,
+                               "vhost driver register failure.\n");
+               }
+
+               if (builtin_net_driver)
+                       rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
+
+               if (mergeable == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_MRG_RXBUF);
+               }
+
+               if (enable_tx_csum == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_CSUM);
+               }
 
-       /* Register vhost(cuse or user) driver to handle vhost messages. */
-       ret = rte_vhost_driver_register((char *)&dev_basename);
-       if (ret != 0)
-               rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
+               if (enable_tso == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_HOST_TSO4);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_HOST_TSO6);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_GUEST_TSO4);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_GUEST_TSO6);
+               }
 
-       rte_vhost_driver_callback_register(&virtio_net_device_ops);
+               if (promiscuous) {
+                       rte_vhost_driver_enable_features(file,
+                               1ULL << VIRTIO_NET_F_CTRL_RX);
+               }
+
+               ret = rte_vhost_driver_callback_register(file,
+                       &virtio_net_device_ops);
+               if (ret != 0) {
+                       rte_exit(EXIT_FAILURE,
+                               "failed to register vhost driver callbacks.\n");
+               }
+
+               if (rte_vhost_driver_start(file) < 0) {
+                       rte_exit(EXIT_FAILURE,
+                               "failed to start vhost driver.\n");
+               }
+       }
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id)
+               rte_eal_wait_lcore(lcore_id);
 
-       /* Start CUSE session. */
-       rte_vhost_driver_session_start();
        return 0;
 
 }