raw/ifpga: remove virtual devices on close
[dpdk.git] / examples / vhost / main.c
index 970fbfb..e7fee5a 100644 (file)
 #include <sys/param.h>
 #include <unistd.h>
 
-#include <rte_atomic.h>
 #include <rte_cycles.h>
 #include <rte_ethdev.h>
 #include <rte_log.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
+#include <rte_net.h>
 #include <rte_vhost.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
 #include <rte_pause.h>
+#include <rte_dmadev.h>
+#include <rte_vhost_async.h>
 
 #include "main.h"
 
@@ -31,6 +33,8 @@
 #define MAX_QUEUES 128
 #endif
 
+#define NUM_MBUFS_DEFAULT 0x24000
+
 /* the maximum number of external ports supported */
 #define MAX_SUP_PORTS 1
 
@@ -43,6 +47,7 @@
 #define BURST_RX_RETRIES 4             /* Number of retries on RX. */
 
 #define JUMBO_FRAME_MAX_SIZE    0x2600
+#define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
 
 /* State of virtio device. */
 #define DEVICE_MAC_LEARNING 0
 #define RTE_TEST_TX_DESC_DEFAULT 512
 
 #define INVALID_PORT_ID 0xFF
+#define INVALID_DMA_ID -1
+
+#define DMA_RING_SIZE 4096
 
-/* Max number of devices. Limited by vmdq. */
-#define MAX_DEVICES 64
+#define ASYNC_ENQUEUE_VHOST 1
+#define ASYNC_DEQUEUE_VHOST 2
 
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
+/* number of mbufs in all pools - if specified on command-line. */
+static int total_num_mbufs = NUM_MBUFS_DEFAULT;
 
-/* Maximum long option length for option parsing. */
-#define MAX_LONG_OPT_SZ 64
+struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
+int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
+static int dma_count;
 
 /* mask of enabled ports */
 static uint32_t enabled_port_mask = 0;
@@ -98,7 +107,6 @@ static uint32_t enable_tx_csum;
 static uint32_t enable_tso;
 
 static int client_mode;
-static int dequeue_zero_copy;
 
 static int builtin_net_driver;
 
@@ -111,26 +119,28 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
 static char *socket_files;
 static int nb_sockets;
 
-/* empty vmdq configuration structure. Filled in programatically */
+static struct vhost_queue_ops vdev_queue_ops[RTE_MAX_VHOST_DEVICE];
+
+/* empty VMDq configuration structure. Filled in programmatically */
 static struct rte_eth_conf vmdq_conf_default = {
        .rxmode = {
-               .mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
+               .mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
                .split_hdr_size = 0,
-               .header_split   = 0, /**< Header Split disabled */
-               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
-               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
                /*
-                * It is necessary for 1G NIC such as I350,
+                * VLAN strip is necessary for 1G NIC such as I350,
                 * this fixes bug of ipv4 forwarding in guest can't
-                * forward pakets from one virtio dev to another virtio dev.
+                * forward packets from one virtio dev to another virtio dev.
                 */
-               .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
-               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 1, /**< CRC stripped by hardware */
+               .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
        },
 
        .txmode = {
-               .mq_mode = ETH_MQ_TX_NONE,
+               .mq_mode = RTE_ETH_MQ_TX_NONE,
+               .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                            RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+                            RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+                            RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+                            RTE_ETH_TX_OFFLOAD_TCP_TSO),
        },
        .rx_adv_conf = {
                /*
@@ -138,7 +148,7 @@ static struct rte_eth_conf vmdq_conf_default = {
                 * appropriate values
                 */
                .vmdq_rx_conf = {
-                       .nb_queue_pools = ETH_8_POOLS,
+                       .nb_queue_pools = RTE_ETH_8_POOLS,
                        .enable_default_pool = 0,
                        .default_pool = 0,
                        .nb_pool_maps = 0,
@@ -147,6 +157,7 @@ static struct rte_eth_conf vmdq_conf_default = {
        },
 };
 
+
 static unsigned lcore_ids[RTE_MAX_LCORE];
 static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports = 0; /**< The number of ports specified in command line */
@@ -166,7 +177,7 @@ const uint16_t vlan_tags[] = {
 };
 
 /* ethernet addresses of ports */
-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct vhost_dev_tailq_list vhost_dev_list =
        TAILQ_HEAD_INITIALIZER(vhost_dev_list);
@@ -180,12 +191,191 @@ struct mbuf_table {
        struct rte_mbuf *m_table[MAX_PKT_BURST];
 };
 
+struct vhost_bufftable {
+       uint32_t len;
+       uint64_t pre_tsc;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
 /* TX queue for each data core. */
 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
 
+/*
+ * Vhost TX buffer for each data core.
+ * Every data core maintains a TX buffer for every vhost device,
+ * which is used for batch pkts enqueue for higher performance.
+ */
+struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * RTE_MAX_VHOST_DEVICE];
+
 #define MBUF_TABLE_DRAIN_TSC   ((rte_get_tsc_hz() + US_PER_S - 1) \
                                 / US_PER_S * BURST_TX_DRAIN_US)
-#define VLAN_HLEN       4
+
+static int vid2socketid[RTE_MAX_VHOST_DEVICE];
+
+static inline uint32_t
+get_async_flag_by_socketid(int socketid)
+{
+       return dma_bind[socketid].async_flag;
+}
+
+static inline void
+init_vid2socketid_array(int vid, int socketid)
+{
+       vid2socketid[vid] = socketid;
+}
+
+static inline bool
+is_dma_configured(int16_t dev_id)
+{
+       int i;
+
+       for (i = 0; i < dma_count; i++)
+               if (dmas_id[i] == dev_id)
+                       return true;
+       return false;
+}
+
+static inline int
+open_dma(const char *value)
+{
+       struct dma_for_vhost *dma_info = dma_bind;
+       char *input = strndup(value, strlen(value) + 1);
+       char *addrs = input;
+       char *ptrs[2];
+       char *start, *end, *substr;
+       int64_t socketid, vring_id;
+
+       struct rte_dma_info info;
+       struct rte_dma_conf dev_config = { .nb_vchans = 1 };
+       struct rte_dma_vchan_conf qconf = {
+               .direction = RTE_DMA_DIR_MEM_TO_MEM,
+               .nb_desc = DMA_RING_SIZE
+       };
+
+       int dev_id;
+       int ret = 0;
+       uint16_t i = 0;
+       char *dma_arg[RTE_MAX_VHOST_DEVICE];
+       int args_nr;
+
+       while (isblank(*addrs))
+               addrs++;
+       if (*addrs == '\0') {
+               ret = -1;
+               goto out;
+       }
+
+       /* process DMA devices within bracket. */
+       addrs++;
+       substr = strtok(addrs, ";]");
+       if (!substr) {
+               ret = -1;
+               goto out;
+       }
+
+       args_nr = rte_strsplit(substr, strlen(substr), dma_arg, RTE_MAX_VHOST_DEVICE, ',');
+       if (args_nr <= 0) {
+               ret = -1;
+               goto out;
+       }
+
+       while (i < args_nr) {
+               char *arg_temp = dma_arg[i];
+               char *txd, *rxd;
+               uint8_t sub_nr;
+               int async_flag;
+
+               sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
+               if (sub_nr != 2) {
+                       ret = -1;
+                       goto out;
+               }
+
+               txd = strstr(ptrs[0], "txd");
+               rxd = strstr(ptrs[0], "rxd");
+               if (txd) {
+                       start = txd;
+                       vring_id = VIRTIO_RXQ;
+                       async_flag = ASYNC_ENQUEUE_VHOST;
+               } else if (rxd) {
+                       start = rxd;
+                       vring_id = VIRTIO_TXQ;
+                       async_flag = ASYNC_DEQUEUE_VHOST;
+               } else {
+                       ret = -1;
+                       goto out;
+               }
+
+               start += 3;
+               socketid = strtol(start, &end, 0);
+               if (end == start) {
+                       ret = -1;
+                       goto out;
+               }
+
+               dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
+               if (dev_id < 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
+                       ret = -1;
+                       goto out;
+               }
+
+               /* DMA device is already configured, so skip */
+               if (is_dma_configured(dev_id))
+                       goto done;
+
+               if (rte_dma_info_get(dev_id, &info) != 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "Error with rte_dma_info_get()\n");
+                       ret = -1;
+                       goto out;
+               }
+
+               if (info.max_vchans < 1) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "No channels available on device %d\n", dev_id);
+                       ret = -1;
+                       goto out;
+               }
+
+               if (rte_dma_configure(dev_id, &dev_config) != 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "Fail to configure DMA %d.\n", dev_id);
+                       ret = -1;
+                       goto out;
+               }
+
+               /* Check the max desc supported by DMA device */
+               rte_dma_info_get(dev_id, &info);
+               if (info.nb_vchans != 1) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "No configured queues reported by DMA %d.\n",
+                                       dev_id);
+                       ret = -1;
+                       goto out;
+               }
+
+               qconf.nb_desc = RTE_MIN(DMA_RING_SIZE, info.max_desc);
+
+               if (rte_dma_vchan_setup(dev_id, 0, &qconf) != 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "Fail to set up DMA %d.\n", dev_id);
+                       ret = -1;
+                       goto out;
+               }
+
+               if (rte_dma_start(dev_id) != 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "Fail to start DMA %u.\n", dev_id);
+                       ret = -1;
+                       goto out;
+               }
+
+               dmas_id[dma_count++] = dev_id;
+
+done:
+               (dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
+               (dma_info + socketid)->async_flag |= async_flag;
+               i++;
+       }
+out:
+       free(input);
+       return ret;
+}
 
 /*
  * Builds up the correct configuration for VMDQ VLAN pool map
@@ -216,21 +406,6 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
        return 0;
 }
 
-/*
- * Validate the device number according to the max pool number gotten form
- * dev_info. If the device number is invalid, give the error message and
- * return -1. Each device must have its own pool.
- */
-static inline int
-validate_num_devices(uint32_t max_nb_devices)
-{
-       if (num_devices > max_nb_devices) {
-               RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
-               return -1;
-       }
-       return 0;
-}
-
 /*
  * Initialises a given port using global settings and with the rx buffers
  * coming from the mbuf_pool passed as parameter
@@ -248,42 +423,37 @@ port_init(uint16_t port)
        uint16_t q;
 
        /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
-       rte_eth_dev_info_get (port, &dev_info);
+       retval = rte_eth_dev_info_get(port, &dev_info);
+       if (retval != 0) {
+               RTE_LOG(ERR, VHOST_PORT,
+                       "Error during getting device (port %u) info: %s\n",
+                       port, strerror(-retval));
 
-       if (dev_info.max_rx_queues > MAX_QUEUES) {
-               rte_exit(EXIT_FAILURE,
-                       "please define MAX_QUEUES no less than %u in %s\n",
-                       dev_info.max_rx_queues, __FILE__);
+               return retval;
+       }
+       if (dev_info.max_vmdq_pools == 0) {
+               RTE_LOG(ERR, VHOST_PORT, "Failed to get VMDq info.\n");
+               return -1;
        }
 
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
        rxconf->rx_drop_en = 1;
 
-       /* Enable vlan offload */
-       txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
-
        /*configure the number of supported virtio devices based on VMDQ limits */
        num_devices = dev_info.max_vmdq_pools;
 
        rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
        tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
 
-       /*
-        * When dequeue zero copy is enabled, guest Tx used vring will be
-        * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
-        * (tx_ring_size here) must be small enough so that the driver will
-        * hit the free threshold easily and free mbufs timely. Otherwise,
-        * guest Tx vring would be starved.
-        */
-       if (dequeue_zero_copy)
-               tx_ring_size = 64;
-
        tx_rings = (uint16_t)rte_lcore_count();
 
-       retval = validate_num_devices(MAX_DEVICES);
-       if (retval < 0)
-               return retval;
+       if (mergeable) {
+               if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
+                       vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
+               else
+                       vmdq_conf_default.rxmode.mtu = MAX_MTU;
+       }
 
        /* Get port configuration. */
        retval = get_eth_conf(&port_conf, num_devices);
@@ -299,9 +469,13 @@ port_init(uint16_t port)
        printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
                num_pf_queues, num_devices, queues_per_pool);
 
-       if (port >= rte_eth_dev_count()) return -1;
+       if (!rte_eth_dev_is_valid_port(port))
+               return -1;
 
        rx_rings = (uint16_t)dev_info.max_rx_queues;
+       if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+               port_conf.txmode.offloads |=
+                       RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        /* Configure ethernet device. */
        retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
        if (retval != 0) {
@@ -324,6 +498,7 @@ port_init(uint16_t port)
        }
 
        /* Setup the queues. */
+       rxconf->offloads = port_conf.rxmode.offloads;
        for (q = 0; q < rx_rings; q ++) {
                retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -336,6 +511,7 @@ port_init(uint16_t port)
                        return retval;
                }
        }
+       txconf->offloads = port_conf.txmode.offloads;
        for (q = 0; q < tx_rings; q ++) {
                retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -356,20 +532,28 @@ port_init(uint16_t port)
                return retval;
        }
 
-       if (promiscuous)
-               rte_eth_promiscuous_enable(port);
+       if (promiscuous) {
+               retval = rte_eth_promiscuous_enable(port);
+               if (retval != 0) {
+                       RTE_LOG(ERR, VHOST_PORT,
+                               "Failed to enable promiscuous mode on port %u: %s\n",
+                               port, rte_strerror(-retval));
+                       return retval;
+               }
+       }
+
+       retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
+       if (retval < 0) {
+               RTE_LOG(ERR, VHOST_PORT,
+                       "Failed to get MAC address on port %u: %s\n",
+                       port, rte_strerror(-retval));
+               return retval;
+       }
 
-       rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
        RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
        RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
-                       " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       port,
-                       vmdq_ports_eth_addr[port].addr_bytes[0],
-                       vmdq_ports_eth_addr[port].addr_bytes[1],
-                       vmdq_ports_eth_addr[port].addr_bytes[2],
-                       vmdq_ports_eth_addr[port].addr_bytes[3],
-                       vmdq_ports_eth_addr[port].addr_bytes[4],
-                       vmdq_ports_eth_addr[port].addr_bytes[5]);
+               " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+               port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
 
        return 0;
 }
@@ -380,12 +564,20 @@ port_init(uint16_t port)
 static int
 us_vhost_parse_socket_path(const char *q_arg)
 {
+       char *old;
+
        /* parse number string */
        if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
                return -1;
 
+       old = socket_files;
        socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
-       snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
+       if (socket_files == NULL) {
+               free(old);
+               return -1;
+       }
+
+       strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
        nb_sockets++;
 
        return 0;
@@ -405,10 +597,7 @@ parse_portmask(const char *portmask)
        /* parse hexadecimal string */
        pm = strtoul(portmask, &end, 16);
        if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
-               return -1;
-
-       if (pm == 0)
-               return -1;
+               return 0;
 
        return pm;
 
@@ -450,7 +639,7 @@ us_vhost_usage(const char *prgname)
        "               --nb-devices ND\n"
        "               -p PORTMASK: Set mask for ports to be used by application\n"
        "               --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
-       "               --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
+       "               --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
        "               --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
        "               --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
        "               --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
@@ -459,10 +648,40 @@ us_vhost_usage(const char *prgname)
        "               --tx-csum [0|1] disable/enable TX checksum offload.\n"
        "               --tso [0|1] disable/enable TCP segment offload.\n"
        "               --client register a vhost-user socket as client mode.\n"
-       "               --dequeue-zero-copy enables dequeue zero copy\n",
+       "               --dmas register dma channel for specific vhost device.\n"
+       "               --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
               prgname);
 }
 
+enum {
+#define OPT_VM2VM               "vm2vm"
+       OPT_VM2VM_NUM = 256,
+#define OPT_RX_RETRY            "rx-retry"
+       OPT_RX_RETRY_NUM,
+#define OPT_RX_RETRY_DELAY      "rx-retry-delay"
+       OPT_RX_RETRY_DELAY_NUM,
+#define OPT_RX_RETRY_NUMB       "rx-retry-num"
+       OPT_RX_RETRY_NUMB_NUM,
+#define OPT_MERGEABLE           "mergeable"
+       OPT_MERGEABLE_NUM,
+#define OPT_STATS               "stats"
+       OPT_STATS_NUM,
+#define OPT_SOCKET_FILE         "socket-file"
+       OPT_SOCKET_FILE_NUM,
+#define OPT_TX_CSUM             "tx-csum"
+       OPT_TX_CSUM_NUM,
+#define OPT_TSO                 "tso"
+       OPT_TSO_NUM,
+#define OPT_CLIENT              "client"
+       OPT_CLIENT_NUM,
+#define OPT_BUILTIN_NET_DRIVER  "builtin-net-driver"
+       OPT_BUILTIN_NET_DRIVER_NUM,
+#define OPT_DMAS                "dmas"
+       OPT_DMAS_NUM,
+#define OPT_NUM_MBUFS           "total-num-mbufs"
+       OPT_NUM_MBUFS_NUM,
+};
+
 /*
  * Parse the arguments given in the command line of the application.
  */
@@ -474,18 +693,32 @@ us_vhost_parse_args(int argc, char **argv)
        unsigned i;
        const char *prgname = argv[0];
        static struct option long_option[] = {
-               {"vm2vm", required_argument, NULL, 0},
-               {"rx-retry", required_argument, NULL, 0},
-               {"rx-retry-delay", required_argument, NULL, 0},
-               {"rx-retry-num", required_argument, NULL, 0},
-               {"mergeable", required_argument, NULL, 0},
-               {"stats", required_argument, NULL, 0},
-               {"socket-file", required_argument, NULL, 0},
-               {"tx-csum", required_argument, NULL, 0},
-               {"tso", required_argument, NULL, 0},
-               {"client", no_argument, &client_mode, 1},
-               {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
-               {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
+               {OPT_VM2VM, required_argument,
+                               NULL, OPT_VM2VM_NUM},
+               {OPT_RX_RETRY, required_argument,
+                               NULL, OPT_RX_RETRY_NUM},
+               {OPT_RX_RETRY_DELAY, required_argument,
+                               NULL, OPT_RX_RETRY_DELAY_NUM},
+               {OPT_RX_RETRY_NUMB, required_argument,
+                               NULL, OPT_RX_RETRY_NUMB_NUM},
+               {OPT_MERGEABLE, required_argument,
+                               NULL, OPT_MERGEABLE_NUM},
+               {OPT_STATS, required_argument,
+                               NULL, OPT_STATS_NUM},
+               {OPT_SOCKET_FILE, required_argument,
+                               NULL, OPT_SOCKET_FILE_NUM},
+               {OPT_TX_CSUM, required_argument,
+                               NULL, OPT_TX_CSUM_NUM},
+               {OPT_TSO, required_argument,
+                               NULL, OPT_TSO_NUM},
+               {OPT_CLIENT, no_argument,
+                               NULL, OPT_CLIENT_NUM},
+               {OPT_BUILTIN_NET_DRIVER, no_argument,
+                               NULL, OPT_BUILTIN_NET_DRIVER_NUM},
+               {OPT_DMAS, required_argument,
+                               NULL, OPT_DMAS_NUM},
+               {OPT_NUM_MBUFS, required_argument,
+                               NULL, OPT_NUM_MBUFS_NUM},
                {NULL, 0, 0, 0},
        };
 
@@ -506,130 +739,135 @@ us_vhost_parse_args(int argc, char **argv)
                case 'P':
                        promiscuous = 1;
                        vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
-                               ETH_VMDQ_ACCEPT_BROADCAST |
-                               ETH_VMDQ_ACCEPT_MULTICAST;
+                               RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+                               RTE_ETH_VMDQ_ACCEPT_MULTICAST;
+                       break;
 
+               case OPT_VM2VM_NUM:
+                       ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG,
+                                       "Invalid argument for "
+                                       "vm2vm [0|1|2]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
+                       }
+                       vm2vm_mode = (vm2vm_type)ret;
                        break;
 
-               case 0:
-                       /* Enable/disable vm2vm comms. */
-                       if (!strncmp(long_option[option_index].name, "vm2vm",
-                               MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG,
-                                               "Invalid argument for "
-                                               "vm2vm [0|1|2]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       vm2vm_mode = (vm2vm_type)ret;
-                               }
+               case OPT_RX_RETRY_NUM:
+                       ret = parse_num_opt(optarg, 1);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       enable_retry = ret;
+                       break;
 
-                       /* Enable/disable retries on RX. */
-                       if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, 1);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       enable_retry = ret;
-                               }
+               case OPT_TX_CSUM_NUM:
+                       ret = parse_num_opt(optarg, 1);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       enable_tx_csum = ret;
+                       break;
 
-                       /* Enable/disable TX checksum offload. */
-                       if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, 1);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else
-                                       enable_tx_csum = ret;
+               case OPT_TSO_NUM:
+                       ret = parse_num_opt(optarg, 1);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       enable_tso = ret;
+                       break;
 
-                       /* Enable/disable TSO offload. */
-                       if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, 1);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else
-                                       enable_tso = ret;
+               case OPT_RX_RETRY_DELAY_NUM:
+                       ret = parse_num_opt(optarg, INT32_MAX);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       burst_rx_delay_time = ret;
+                       break;
 
-                       /* Specify the retries delay time (in useconds) on RX. */
-                       if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, INT32_MAX);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       burst_rx_delay_time = ret;
-                               }
+               case OPT_RX_RETRY_NUMB_NUM:
+                       ret = parse_num_opt(optarg, INT32_MAX);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       burst_rx_retry_num = ret;
+                       break;
 
-                       /* Specify the retries number on RX. */
-                       if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, INT32_MAX);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       burst_rx_retry_num = ret;
-                               }
+               case OPT_MERGEABLE_NUM:
+                       ret = parse_num_opt(optarg, 1);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       mergeable = !!ret;
+                       break;
 
-                       /* Enable/disable RX mergeable buffers. */
-                       if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, 1);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       mergeable = !!ret;
-                                       if (ret) {
-                                               vmdq_conf_default.rxmode.jumbo_frame = 1;
-                                               vmdq_conf_default.rxmode.max_rx_pkt_len
-                                                       = JUMBO_FRAME_MAX_SIZE;
-                                       }
-                               }
+               case OPT_STATS_NUM:
+                       ret = parse_num_opt(optarg, INT32_MAX);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG,
+                                       "Invalid argument for stats [0..N]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       enable_stats = ret;
+                       break;
+
+               /* Set socket file path. */
+               case OPT_SOCKET_FILE_NUM:
+                       if (us_vhost_parse_socket_path(optarg) == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG,
+                               "Invalid argument for socket name (Max %d characters)\n",
+                               PATH_MAX);
+                               us_vhost_usage(prgname);
+                               return -1;
+                       }
+                       break;
 
-                       /* Enable/disable stats. */
-                       if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
-                               ret = parse_num_opt(optarg, INT32_MAX);
-                               if (ret == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG,
-                                               "Invalid argument for stats [0..N]\n");
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               } else {
-                                       enable_stats = ret;
-                               }
+               case OPT_DMAS_NUM:
+                       if (open_dma(optarg) == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG,
+                                       "Wrong DMA args\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
+                       break;
 
-                       /* Set socket file path. */
-                       if (!strncmp(long_option[option_index].name,
-                                               "socket-file", MAX_LONG_OPT_SZ)) {
-                               if (us_vhost_parse_socket_path(optarg) == -1) {
-                                       RTE_LOG(INFO, VHOST_CONFIG,
-                                       "Invalid argument for socket name (Max %d characters)\n",
-                                       PATH_MAX);
-                                       us_vhost_usage(prgname);
-                                       return -1;
-                               }
+               case OPT_NUM_MBUFS_NUM:
+                       ret = parse_num_opt(optarg, INT32_MAX);
+                       if (ret == -1) {
+                               RTE_LOG(INFO, VHOST_CONFIG,
+                                       "Invalid argument for total-num-mbufs [0..N]\n");
+                               us_vhost_usage(prgname);
+                               return -1;
                        }
 
+                       if (total_num_mbufs < ret)
+                               total_num_mbufs = ret;
+                       break;
+
+               case OPT_CLIENT_NUM:
+                       client_mode = 1;
+                       break;
+
+               case OPT_BUILTIN_NET_DRIVER_NUM:
+                       builtin_net_driver = 1;
                        break;
 
-                       /* Invalid option - print options. */
+               /* Invalid option - print options. */
                default:
                        us_vhost_usage(prgname);
                        return -1;
@@ -666,9 +904,10 @@ static unsigned check_ports_num(unsigned nb_ports)
        }
 
        for (portid = 0; portid < num_ports; portid ++) {
-               if (ports[portid] >= nb_ports) {
-                       RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
-                               ports[portid], (nb_ports - 1));
+               if (!rte_eth_dev_is_valid_port(ports[portid])) {
+                       RTE_LOG(INFO, VHOST_PORT,
+                               "\nSpecified port ID(%u) is not valid\n",
+                               ports[portid]);
                        ports[portid] = INVALID_PORT_ID;
                        valid_num_ports--;
                }
@@ -677,13 +916,13 @@ static unsigned check_ports_num(unsigned nb_ports)
 }
 
 static __rte_always_inline struct vhost_dev *
-find_vhost_dev(struct ether_addr *mac)
+find_vhost_dev(struct rte_ether_addr *mac)
 {
        struct vhost_dev *vdev;
 
        TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
                if (vdev->ready == DEVICE_RX &&
-                   is_same_ether_addr(mac, &vdev->mac_address))
+                   rte_is_same_ether_addr(mac, &vdev->mac_address))
                        return vdev;
        }
 
@@ -697,32 +936,30 @@ find_vhost_dev(struct ether_addr *mac)
 static int
 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
-       struct ether_hdr *pkt_hdr;
+       struct rte_ether_hdr *pkt_hdr;
        int i, ret;
 
        /* Learn MAC address of guest device from packet */
-       pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       if (find_vhost_dev(&pkt_hdr->s_addr)) {
+       if (find_vhost_dev(&pkt_hdr->src_addr)) {
                RTE_LOG(ERR, VHOST_DATA,
                        "(%d) device is using a registered MAC!\n",
                        vdev->vid);
                return -1;
        }
 
-       for (i = 0; i < ETHER_ADDR_LEN; i++)
-               vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
+       for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+               vdev->mac_address.addr_bytes[i] =
+                       pkt_hdr->src_addr.addr_bytes[i];
 
        /* vlan_tag currently uses the device_id. */
        vdev->vlan_tag = vlan_tags[vdev->vid];
 
        /* Print out VMDQ registration info. */
        RTE_LOG(INFO, VHOST_DATA,
-               "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
-               vdev->vid,
-               vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
-               vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
-               vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
+               "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
+               vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
                vdev->vlan_tag);
 
        /* Register the MAC address. */
@@ -776,8 +1013,29 @@ unlink_vmdq(struct vhost_dev *vdev)
        }
 }
 
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+       while (n--)
+               rte_pktmbuf_free(pkts[n]);
+}
+
+static __rte_always_inline void
+complete_async_pkts(struct vhost_dev *vdev)
+{
+       struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+       uint16_t complete_count;
+       int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].dev_id;
+
+       complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+                                       VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
+       if (complete_count)
+               free_pkts(p_cpl, complete_count);
+
+}
+
 static __rte_always_inline void
-virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
            struct rte_mbuf *m)
 {
        uint16_t ret;
@@ -789,13 +1047,63 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
        }
 
        if (enable_stats) {
-               rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
-               rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+               __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
+                               __ATOMIC_SEQ_CST);
+               __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
+                               __ATOMIC_SEQ_CST);
                src_vdev->stats.tx_total++;
                src_vdev->stats.tx += ret;
        }
 }
 
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+       uint16_t ret;
+       uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+       uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+       struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+       ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
+
+       if (enable_stats) {
+               __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+                               __ATOMIC_SEQ_CST);
+               __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+                               __ATOMIC_SEQ_CST);
+       }
+
+       if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
+               free_pkts(m, nr_xmit);
+}
+
+static __rte_always_inline void
+drain_vhost_table(void)
+{
+       uint16_t lcore_id = rte_lcore_id();
+       struct vhost_bufftable *vhost_txq;
+       struct vhost_dev *vdev;
+       uint64_t cur_tsc;
+
+       TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+               if (unlikely(vdev->remove == 1))
+                       continue;
+
+               vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
+
+               cur_tsc = rte_rdtsc();
+               if (unlikely(cur_tsc - vhost_txq->pre_tsc
+                               > MBUF_TABLE_DRAIN_TSC)) {
+                       RTE_LOG_DP(DEBUG, VHOST_DATA,
+                               "Vhost TX queue drained after timeout with burst size %u\n",
+                               vhost_txq->len);
+                       drain_vhost(vdev);
+                       vhost_txq->len = 0;
+                       vhost_txq->pre_tsc = cur_tsc;
+               }
+       }
+}
+
 /*
  * Check if the packet destination MAC address is for a local device. If so then put
  * the packet on that devices RX queue. If not then return.
@@ -803,12 +1111,13 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 static __rte_always_inline int
 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
-       struct ether_hdr *pkt_hdr;
+       struct rte_ether_hdr *pkt_hdr;
        struct vhost_dev *dst_vdev;
+       struct vhost_bufftable *vhost_txq;
+       uint16_t lcore_id = rte_lcore_id();
+       pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
-
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return -1;
 
@@ -828,7 +1137,19 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
                return 0;
        }
 
-       virtio_xmit(dst_vdev, vdev, m);
+       vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + dst_vdev->vid];
+       vhost_txq->m_table[vhost_txq->len++] = m;
+
+       if (enable_stats) {
+               vdev->stats.tx_total++;
+               vdev->stats.tx++;
+       }
+
+       if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
+               drain_vhost(dst_vdev);
+               vhost_txq->len = 0;
+               vhost_txq->pre_tsc = rte_rdtsc();
+       }
        return 0;
 }
 
@@ -841,9 +1162,10 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        uint32_t *offset, uint16_t *vlan_tag)
 {
        struct vhost_dev *dst_vdev;
-       struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       struct rte_ether_hdr *pkt_hdr =
+               rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return 0;
 
@@ -859,7 +1181,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
         * by minus length of vlan tag, so need restore
         * the packet length by plus it.
         */
-       *offset  = VLAN_HLEN;
+       *offset  = RTE_VLAN_HLEN;
        *vlan_tag = vlan_tags[vdev->vid];
 
        RTE_LOG_DP(DEBUG, VHOST_DATA,
@@ -869,39 +1191,34 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        return 0;
 }
 
-static uint16_t
-get_psd_sum(void *l3_hdr, uint64_t ol_flags)
-{
-       if (ol_flags & PKT_TX_IPV4)
-               return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
-       else /* assume ethertype == ETHER_TYPE_IPv6 */
-               return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
 static void virtio_tx_offload(struct rte_mbuf *m)
 {
+       struct rte_net_hdr_lens hdr_lens;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_tcp_hdr *tcp_hdr;
+       uint32_t ptype;
        void *l3_hdr;
-       struct ipv4_hdr *ipv4_hdr = NULL;
-       struct tcp_hdr *tcp_hdr = NULL;
-       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
 
-       l3_hdr = (char *)eth_hdr + m->l2_len;
+       ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+       m->l2_len = hdr_lens.l2_len;
+       m->l3_len = hdr_lens.l3_len;
+       m->l4_len = hdr_lens.l4_len;
+
+       l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
+       tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
+               m->l2_len + m->l3_len);
 
-       if (m->ol_flags & PKT_TX_IPV4) {
+       m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
+       if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
+               m->ol_flags |= RTE_MBUF_F_TX_IPV4;
+               m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
                ipv4_hdr = l3_hdr;
                ipv4_hdr->hdr_checksum = 0;
-               m->ol_flags |= PKT_TX_IP_CKSUM;
+               tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
+       } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
+               m->ol_flags |= RTE_MBUF_F_TX_IPV6;
+               tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
        }
-
-       tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
-       tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
-}
-
-static inline void
-free_pkts(struct rte_mbuf **pkts, uint16_t n)
-{
-       while (n--)
-               rte_pktmbuf_free(pkts[n]);
 }
 
 static __rte_always_inline void
@@ -927,24 +1244,23 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
        struct mbuf_table *tx_q;
        unsigned offset = 0;
        const uint16_t lcore_id = rte_lcore_id();
-       struct ether_hdr *nh;
+       struct rte_ether_hdr *nh;
 
 
-       nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
-       if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
+       nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+       if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
-                       virtio_xmit(vdev2, vdev, m);
+                       if (vdev2 != vdev)
+                               sync_virtio_xmit(vdev2, vdev, m);
                }
                goto queue2nic;
        }
 
        /*check if destination is local VM*/
-       if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
-               rte_pktmbuf_free(m);
+       if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
                return;
-       }
 
        if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
                if (unlikely(find_local_dest(vdev, m, &offset,
@@ -962,16 +1278,16 @@ queue2nic:
        /*Add packet to the port tx queue*/
        tx_q = &lcore_tx_queue[lcore_id];
 
-       nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
-       if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
+       nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+       if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
                /* Guest has inserted the vlan tag. */
-               struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
+               struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
                uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
                if ((vm2vm_mode == VM2VM_HARDWARE) &&
                        (vh->vlan_tci != vlan_tag_be))
                        vh->vlan_tci = vlan_tag_be;
        } else {
-               m->ol_flags |= PKT_TX_VLAN_PKT;
+               m->ol_flags |= RTE_MBUF_F_TX_VLAN;
 
                /*
                 * Find the right seg to adjust the data len when offset is
@@ -995,7 +1311,7 @@ queue2nic:
                m->vlan_tci = vlan_tag;
        }
 
-       if (m->ol_flags & PKT_TX_TCP_SEG)
+       if (m->ol_flags & RTE_MBUF_F_RX_LRO)
                virtio_tx_offload(m);
 
        tx_q->m_table[tx_q->len++] = m;
@@ -1029,6 +1345,32 @@ drain_mbuf_table(struct mbuf_table *tx_q)
        }
 }
 
+uint16_t
+async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+               struct rte_mbuf **pkts, uint32_t rx_count)
+{
+       uint16_t enqueue_count;
+       uint16_t enqueue_fail = 0;
+       uint16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_RXQ].dev_id;
+
+       complete_async_pkts(dev);
+       enqueue_count = rte_vhost_submit_enqueue_burst(dev->vid, queue_id,
+                                       pkts, rx_count, dma_id, 0);
+
+       enqueue_fail = rx_count - enqueue_count;
+       if (enqueue_fail)
+               free_pkts(&pkts[enqueue_count], enqueue_fail);
+
+       return enqueue_count;
+}
+
+uint16_t
+sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+               struct rte_mbuf **pkts, uint32_t rx_count)
+{
+       return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, rx_count);
+}
+
 static __rte_always_inline void
 drain_eth_rx(struct vhost_dev *vdev)
 {
@@ -1037,6 +1379,7 @@ drain_eth_rx(struct vhost_dev *vdev)
 
        rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
                                    pkts, MAX_PKT_BURST);
+
        if (!rx_count)
                return;
 
@@ -1058,19 +1401,39 @@ drain_eth_rx(struct vhost_dev *vdev)
                }
        }
 
-       if (builtin_net_driver) {
-               enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
-                                               pkts, rx_count);
-       } else {
-               enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-                                               pkts, rx_count);
-       }
+       enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+                                       VIRTIO_RXQ, pkts, rx_count);
+
        if (enable_stats) {
-               rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
-               rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
+               __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
+                               __ATOMIC_SEQ_CST);
+               __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
+                               __ATOMIC_SEQ_CST);
        }
 
-       free_pkts(pkts, rx_count);
+       if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
+               free_pkts(pkts, rx_count);
+}
+
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+                           struct rte_mempool *mbuf_pool,
+                           struct rte_mbuf **pkts, uint16_t count)
+{
+       int nr_inflight;
+       uint16_t dequeue_count;
+       int16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_TXQ].dev_id;
+
+       dequeue_count = rte_vhost_async_try_dequeue_burst(dev->vid, queue_id,
+                       mbuf_pool, pkts, count, &nr_inflight, dma_id, 0);
+
+       return dequeue_count;
+}
+
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+                          struct rte_mempool *mbuf_pool,
+                          struct rte_mbuf **pkts, uint16_t count)
+{
+       return rte_vhost_dequeue_burst(dev->vid, queue_id, mbuf_pool, pkts, count);
 }
 
 static __rte_always_inline void
@@ -1080,13 +1443,8 @@ drain_virtio_tx(struct vhost_dev *vdev)
        uint16_t count;
        uint16_t i;
 
-       if (builtin_net_driver) {
-               count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
-                                       pkts, MAX_PKT_BURST);
-       } else {
-               count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
-                                       mbuf_pool, pkts, MAX_PKT_BURST);
-       }
+       count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev,
+                               VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST);
 
        /* setup VMDq for the first packet */
        if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1123,7 +1481,7 @@ switch_worker(void *arg __rte_unused)
        struct vhost_dev *vdev;
        struct mbuf_table *tx_q;
 
-       RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
+       RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
 
        tx_q = &lcore_tx_queue[lcore_id];
        for (i = 0; i < rte_lcore_count(); i++) {
@@ -1135,7 +1493,7 @@ switch_worker(void *arg __rte_unused)
 
        while(1) {
                drain_mbuf_table(tx_q);
-
+               drain_vhost_table();
                /*
                 * Inform the configuration core that we have exited the
                 * linked list and that no devices are in use if requested.
@@ -1165,9 +1523,29 @@ switch_worker(void *arg __rte_unused)
        return 0;
 }
 
+static void
+vhost_clear_queue_thread_unsafe(struct vhost_dev *vdev, uint16_t queue_id)
+{
+       uint16_t n_pkt = 0;
+       int pkts_inflight;
+
+       int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+       pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vdev->vid, queue_id);
+
+       struct rte_mbuf *m_cpl[pkts_inflight];
+
+       while (pkts_inflight) {
+               n_pkt = rte_vhost_clear_queue_thread_unsafe(vdev->vid, queue_id, m_cpl,
+                                                       pkts_inflight, dma_id, 0);
+               free_pkts(m_cpl, n_pkt);
+               pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vdev->vid,
+                                                                       queue_id);
+       }
+}
+
 /*
  * Remove a device from the specific data core linked list and from the
- * main linked list. Synchonization  occurs through the use of the
+ * main linked list. Synchronization  occurs through the use of the
  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
  */
@@ -1176,6 +1554,7 @@ destroy_device(int vid)
 {
        struct vhost_dev *vdev = NULL;
        int lcore;
+       uint16_t i;
 
        TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
                if (vdev->vid == vid)
@@ -1189,6 +1568,9 @@ destroy_device(int vid)
                rte_pause();
        }
 
+       for (i = 0; i < RTE_MAX_LCORE; i++)
+               rte_free(vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]);
+
        if (builtin_net_driver)
                vs_vhost_net_remove(vdev);
 
@@ -1198,7 +1580,7 @@ destroy_device(int vid)
 
 
        /* Set the dev_removal_flag on each lcore. */
-       RTE_LCORE_FOREACH_SLAVE(lcore)
+       RTE_LCORE_FOREACH_WORKER(lcore)
                lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
 
        /*
@@ -1206,7 +1588,7 @@ destroy_device(int vid)
         * we can be sure that they can no longer access the device removed
         * from the linked lists and that the devices are no longer in use.
         */
-       RTE_LCORE_FOREACH_SLAVE(lcore) {
+       RTE_LCORE_FOREACH_WORKER(lcore) {
                while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
                        rte_pause();
        }
@@ -1217,19 +1599,92 @@ destroy_device(int vid)
                "(%d) device has been removed from data core\n",
                vdev->vid);
 
+       if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
+               vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
+               rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+               dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
+       }
+
+       if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
+               vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+               rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
+               dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
+       }
+
        rte_free(vdev);
 }
 
+static inline int
+get_socketid_by_vid(int vid)
+{
+       int i;
+       char ifname[PATH_MAX];
+       rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+       for (i = 0; i < nb_sockets; i++) {
+               char *file = socket_files + i * PATH_MAX;
+               if (strcmp(file, ifname) == 0)
+                       return i;
+       }
+
+       return -1;
+}
+
+static int
+init_vhost_queue_ops(int vid)
+{
+       if (builtin_net_driver) {
+               vdev_queue_ops[vid].enqueue_pkt_burst = builtin_enqueue_pkts;
+               vdev_queue_ops[vid].dequeue_pkt_burst = builtin_dequeue_pkts;
+       } else {
+               if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled)
+                       vdev_queue_ops[vid].enqueue_pkt_burst = async_enqueue_pkts;
+               else
+                       vdev_queue_ops[vid].enqueue_pkt_burst = sync_enqueue_pkts;
+
+               if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled)
+                       vdev_queue_ops[vid].dequeue_pkt_burst = async_dequeue_pkts;
+               else
+                       vdev_queue_ops[vid].dequeue_pkt_burst = sync_dequeue_pkts;
+       }
+
+       return 0;
+}
+
+static inline int
+vhost_async_channel_register(int vid)
+{
+       int rx_ret = 0, tx_ret = 0;
+
+       if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
+               rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
+               if (rx_ret == 0)
+                       dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
+       }
+
+       if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id != INVALID_DMA_ID) {
+               tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
+               if (tx_ret == 0)
+                       dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
+       }
+
+       return rx_ret | tx_ret;
+}
+
+
+
 /*
  * A new device is added to a data core. First the device is added to the main linked list
- * and the allocated to a specific data core.
+ * and then allocated to a specific data core.
  */
 static int
 new_device(int vid)
 {
        int lcore, core_add = 0;
+       uint16_t i;
        uint32_t device_num_min = num_devices;
        struct vhost_dev *vdev;
+       int ret;
 
        vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
        if (vdev == NULL) {
@@ -1240,6 +1695,30 @@ new_device(int vid)
        }
        vdev->vid = vid;
 
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]
+                       = rte_zmalloc("vhost bufftable",
+                               sizeof(struct vhost_bufftable),
+                               RTE_CACHE_LINE_SIZE);
+
+               if (vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid] == NULL) {
+                       RTE_LOG(INFO, VHOST_DATA,
+                         "(%d) couldn't allocate memory for vhost TX\n", vid);
+                       return -1;
+               }
+       }
+
+       int socketid = get_socketid_by_vid(vid);
+       if (socketid == -1)
+               return -1;
+
+       init_vid2socketid_array(vid, socketid);
+
+       ret =  vhost_async_channel_register(vid);
+
+       if (init_vhost_queue_ops(vid) != 0)
+               return -1;
+
        if (builtin_net_driver)
                vs_vhost_net_setup(vdev);
 
@@ -1251,7 +1730,7 @@ new_device(int vid)
        vdev->remove = 0;
 
        /* Find a suitable lcore to add the device. */
-       RTE_LCORE_FOREACH_SLAVE(lcore) {
+       RTE_LCORE_FOREACH_WORKER(lcore) {
                if (lcore_info[lcore].device_num < device_num_min) {
                        device_num_min = lcore_info[lcore].device_num;
                        core_add = lcore;
@@ -1271,6 +1750,29 @@ new_device(int vid)
                "(%d) device has been added to data core %d\n",
                vid, vdev->coreid);
 
+       return ret;
+}
+
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+       struct vhost_dev *vdev = NULL;
+
+       TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+               if (vdev->vid == vid)
+                       break;
+       }
+       if (!vdev)
+               return -1;
+
+       if (queue_id != VIRTIO_RXQ)
+               return 0;
+
+       if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
+               if (!enable)
+                       vhost_clear_queue_thread_unsafe(vdev, queue_id);
+       }
+
        return 0;
 }
 
@@ -1278,18 +1780,19 @@ new_device(int vid)
  * These callback allow devices to be added to the data core when configuration
  * has been fully complete.
  */
-static const struct vhost_device_ops virtio_net_device_ops =
+static const struct rte_vhost_device_ops virtio_net_device_ops =
 {
        .new_device =  new_device,
        .destroy_device = destroy_device,
+       .vring_state_changed = vring_state_changed,
 };
 
 /*
  * This is a thread will wake up after a period to print stats if the user has
  * enabled them.
  */
-static void
-print_stats(void)
+static void *
+print_stats(__rte_unused void *arg)
 {
        struct vhost_dev *vdev;
        uint64_t tx_dropped, rx_dropped;
@@ -1309,8 +1812,10 @@ print_stats(void)
                        tx         = vdev->stats.tx;
                        tx_dropped = tx_total - tx;
 
-                       rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
-                       rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
+                       rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
+                               __ATOMIC_SEQ_CST);
+                       rx         = __atomic_load_n(&vdev->stats.rx_atomic,
+                               __ATOMIC_SEQ_CST);
                        rx_dropped = rx_total - rx;
 
                        printf("Statistics for device %d\n"
@@ -1327,7 +1832,11 @@ print_stats(void)
                }
 
                printf("===================================================\n");
+
+               fflush(stdout);
        }
+
+       return NULL;
 }
 
 static void
@@ -1354,55 +1863,22 @@ sigint_handler(__rte_unused int signum)
        exit(0);
 }
 
-/*
- * While creating an mbuf pool, one key thing is to figure out how
- * many mbuf entries is enough for our use. FYI, here are some
- * guidelines:
- *
- * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
- *
- * - For each switch core (A CPU core does the packet switch), we need
- *   also make some reservation for receiving the packets from virtio
- *   Tx queue. How many is enough depends on the usage. It's normally
- *   a simple calculation like following:
- *
- *       MAX_PKT_BURST * max packet size / mbuf size
- *
- *   So, we definitely need allocate more mbufs when TSO is enabled.
- *
- * - Similarly, for each switching core, we should serve @nr_rx_desc
- *   mbufs for receiving the packets from physical NIC device.
- *
- * - We also need make sure, for each switch core, we have allocated
- *   enough mbufs to fill up the mbuf cache.
- */
 static void
-create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
-       uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+reset_dma(void)
 {
-       uint32_t nr_mbufs;
-       uint32_t nr_mbufs_per_core;
-       uint32_t mtu = 1500;
-
-       if (mergeable)
-               mtu = 9000;
-       if (enable_tso)
-               mtu = 64 * 1024;
-
-       nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
-                       (mbuf_size - RTE_PKTMBUF_HEADROOM);
-       nr_mbufs_per_core += nr_rx_desc;
-       nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
-
-       nr_mbufs  = nr_queues * nr_rx_desc;
-       nr_mbufs += nr_mbufs_per_core * nr_switch_core;
-       nr_mbufs *= nr_port;
-
-       mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
-                                           nr_mbuf_cache, 0, mbuf_size,
-                                           rte_socket_id());
-       if (mbuf_pool == NULL)
-               rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+       int i;
+
+       for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
+               int j;
+
+               for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
+                       dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
+                       dma_bind[i].dmas[j].async_enabled = false;
+               }
+       }
+
+       for (i = 0; i < RTE_DMADEV_DEFAULT_MAX; i++)
+               dmas_id[i] = INVALID_DMA_ID;
 }
 
 /*
@@ -1416,8 +1892,7 @@ main(int argc, char *argv[])
        int ret, i;
        uint16_t portid;
        static pthread_t tid;
-       char thread_name[RTE_MAX_THREAD_NAME_LEN];
-       uint64_t flags = 0;
+       uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
 
        signal(SIGINT, sigint_handler);
 
@@ -1428,6 +1903,9 @@ main(int argc, char *argv[])
        argc -= ret;
        argv += ret;
 
+       /* initialize dma structures */
+       reset_dma();
+
        /* parse app arguments */
        ret = us_vhost_parse_args(argc, argv);
        if (ret < 0)
@@ -1444,7 +1922,7 @@ main(int argc, char *argv[])
                rte_exit(EXIT_FAILURE,"Not enough cores\n");
 
        /* Get the number of physical ports. */
-       nb_ports = rte_eth_dev_count();
+       nb_ports = rte_eth_dev_count_avail();
 
        /*
         * Update the global var NUM_PORTS and global array PORTS
@@ -1464,8 +1942,11 @@ main(int argc, char *argv[])
         * many queues here. We probably should only do allocation for
         * those queues we are going to use.
         */
-       create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
-                        MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
+       mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
+                                           MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+                                           rte_socket_id());
+       if (mbuf_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 
        if (vm2vm_mode == VM2VM_HARDWARE) {
                /* Enable VT loop back to let L2 switch to do it. */
@@ -1475,7 +1956,7 @@ main(int argc, char *argv[])
        }
 
        /* initialize all ports */
-       for (portid = 0; portid < nb_ports; portid++) {
+       RTE_ETH_FOREACH_DEV(portid) {
                /* skip ports that are not enabled */
                if ((enabled_port_mask & (1 << portid)) == 0) {
                        RTE_LOG(INFO, VHOST_PORT,
@@ -1489,32 +1970,34 @@ main(int argc, char *argv[])
 
        /* Enable stats if the user option is set. */
        if (enable_stats) {
-               ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
-               if (ret != 0)
+               ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
+                                       print_stats, NULL);
+               if (ret < 0)
                        rte_exit(EXIT_FAILURE,
                                "Cannot create print-stats thread\n");
-
-               /* Set thread_name for aid in debugging.  */
-               snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
-               ret = rte_thread_setname(tid, thread_name);
-               if (ret != 0)
-                       RTE_LOG(DEBUG, VHOST_CONFIG,
-                               "Cannot set print-stats name\n");
        }
 
        /* Launch all data cores. */
-       RTE_LCORE_FOREACH_SLAVE(lcore_id)
+       RTE_LCORE_FOREACH_WORKER(lcore_id)
                rte_eal_remote_launch(switch_worker, NULL, lcore_id);
 
        if (client_mode)
                flags |= RTE_VHOST_USER_CLIENT;
 
-       if (dequeue_zero_copy)
-               flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+       for (i = 0; i < dma_count; i++) {
+               if (rte_vhost_async_dma_configure(dmas_id[i], 0) < 0) {
+                       RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
+                       rte_exit(EXIT_FAILURE, "Cannot use given DMA device\n");
+               }
+       }
 
        /* Register vhost user driver to handle vhost messages. */
        for (i = 0; i < nb_sockets; i++) {
                char *file = socket_files + i * PATH_MAX;
+
+               if (dma_count && get_async_flag_by_socketid(i) != 0)
+                       flags = flags | RTE_VHOST_USER_ASYNC_COPY;
+
                ret = rte_vhost_driver_register(file, flags);
                if (ret != 0) {
                        unregister_drivers(i);
@@ -1564,9 +2047,11 @@ main(int argc, char *argv[])
                }
        }
 
-       RTE_LCORE_FOREACH_SLAVE(lcore_id)
+       RTE_LCORE_FOREACH_WORKER(lcore_id)
                rte_eal_wait_lcore(lcore_id);
 
-       return 0;
+       /* clean up the EAL */
+       rte_eal_cleanup();
 
+       return 0;
 }