net: add rte prefix to ether structures
[dpdk.git] / examples / vhost / main.c
index 0709859..cd1a51b 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <arpa/inet.h>
 #include <rte_log.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
+#include <rte_pause.h>
 
 #include "main.h"
 
@@ -65,7 +37,6 @@
 #define MBUF_CACHE_SIZE        128
 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
 
-#define MAX_PKT_BURST 32               /* Max burst size for RX/TX */
 #define BURST_TX_DRAIN_US 100  /* TX drain every ~100us */
 
 #define BURST_RX_WAIT_US 15    /* Defines how long we wait between retries on RX */
 
 #define INVALID_PORT_ID 0xFF
 
-/* Max number of devices. Limited by vmdq. */
-#define MAX_DEVICES 64
-
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
-
 /* Maximum long option length for option parsing. */
 #define MAX_LONG_OPT_SZ 64
 
@@ -129,6 +94,8 @@ static uint32_t enable_tso;
 static int client_mode;
 static int dequeue_zero_copy;
 
+static int builtin_net_driver;
+
 /* Specify timeout (in useconds) between retries on RX. */
 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
 /* Specify the number of retries on RX. */
@@ -143,21 +110,21 @@ static struct rte_eth_conf vmdq_conf_default = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
                .split_hdr_size = 0,
-               .header_split   = 0, /**< Header Split disabled */
-               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
-               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
                /*
-                * It is necessary for 1G NIC such as I350,
+                * VLAN strip is necessary for 1G NIC such as I350,
                 * this fixes bug of ipv4 forwarding in guest can't
                 * forward pakets from one virtio dev to another virtio dev.
                 */
-               .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
-               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+               .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
        },
 
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
+               .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                            DEV_TX_OFFLOAD_TCP_CKSUM |
+                            DEV_TX_OFFLOAD_VLAN_INSERT |
+                            DEV_TX_OFFLOAD_MULTI_SEGS |
+                            DEV_TX_OFFLOAD_TCP_TSO),
        },
        .rx_adv_conf = {
                /*
@@ -174,8 +141,9 @@ static struct rte_eth_conf vmdq_conf_default = {
        },
 };
 
+
 static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
 static unsigned num_ports = 0; /**< The number of ports specified in command line */
 static uint16_t num_pf_queues, num_vmdq_queues;
 static uint16_t vmdq_pool_base, vmdq_queue_base;
@@ -193,7 +161,7 @@ const uint16_t vlan_tags[] = {
 };
 
 /* ethernet addresses of ports */
-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct vhost_dev_tailq_list vhost_dev_list =
        TAILQ_HEAD_INITIALIZER(vhost_dev_list);
@@ -243,27 +211,12 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
        return 0;
 }
 
-/*
- * Validate the device number according to the max pool number gotten form
- * dev_info. If the device number is invalid, give the error message and
- * return -1. Each device must have its own pool.
- */
-static inline int
-validate_num_devices(uint32_t max_nb_devices)
-{
-       if (num_devices > max_nb_devices) {
-               RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
-               return -1;
-       }
-       return 0;
-}
-
 /*
  * Initialises a given port using global settings and with the rx buffers
  * coming from the mbuf_pool passed as parameter
  */
 static inline int
-port_init(uint8_t port)
+port_init(uint16_t port)
 {
        struct rte_eth_dev_info dev_info;
        struct rte_eth_conf port_conf;
@@ -277,19 +230,10 @@ port_init(uint8_t port)
        /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
        rte_eth_dev_info_get (port, &dev_info);
 
-       if (dev_info.max_rx_queues > MAX_QUEUES) {
-               rte_exit(EXIT_FAILURE,
-                       "please define MAX_QUEUES no less than %u in %s\n",
-                       dev_info.max_rx_queues, __FILE__);
-       }
-
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
        rxconf->rx_drop_en = 1;
 
-       /* Enable vlan offload */
-       txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
-
        /*configure the number of supported virtio devices based on VMDQ limits */
        num_devices = dev_info.max_vmdq_pools;
 
@@ -308,10 +252,6 @@ port_init(uint8_t port)
 
        tx_rings = (uint16_t)rte_lcore_count();
 
-       retval = validate_num_devices(MAX_DEVICES);
-       if (retval < 0)
-               return retval;
-
        /* Get port configuration. */
        retval = get_eth_conf(&port_conf, num_devices);
        if (retval < 0)
@@ -326,19 +266,13 @@ port_init(uint8_t port)
        printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
                num_pf_queues, num_devices, queues_per_pool);
 
-       if (port >= rte_eth_dev_count()) return -1;
-
-       if (enable_tx_csum == 0)
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
-
-       if (enable_tso == 0) {
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
-       }
+       if (!rte_eth_dev_is_valid_port(port))
+               return -1;
 
        rx_rings = (uint16_t)dev_info.max_rx_queues;
+       if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+               port_conf.txmode.offloads |=
+                       DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        /* Configure ethernet device. */
        retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
        if (retval != 0) {
@@ -347,7 +281,21 @@ port_init(uint8_t port)
                return retval;
        }
 
+       retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+               &tx_ring_size);
+       if (retval != 0) {
+               RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
+                       "for port %u: %s.\n", port, strerror(-retval));
+               return retval;
+       }
+       if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
+               RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
+                       "for Rx queues on port %u.\n", port);
+               return -1;
+       }
+
        /* Setup the queues. */
+       rxconf->offloads = port_conf.rxmode.offloads;
        for (q = 0; q < rx_rings; q ++) {
                retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -360,6 +308,7 @@ port_init(uint8_t port)
                        return retval;
                }
        }
+       txconf->offloads = port_conf.txmode.offloads;
        for (q = 0; q < tx_rings; q ++) {
                retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -387,7 +336,7 @@ port_init(uint8_t port)
        RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
        RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
                        " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       (unsigned)port,
+                       port,
                        vmdq_ports_eth_addr[port].addr_bytes[0],
                        vmdq_ports_eth_addr[port].addr_bytes[1],
                        vmdq_ports_eth_addr[port].addr_bytes[2],
@@ -404,12 +353,20 @@ port_init(uint8_t port)
 static int
 us_vhost_parse_socket_path(const char *q_arg)
 {
+       char *old;
+
        /* parse number string */
-       if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
+       if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
                return -1;
 
+       old = socket_files;
        socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
-       snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
+       if (socket_files == NULL) {
+               free(old);
+               return -1;
+       }
+
+       strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
        nb_sockets++;
 
        return 0;
@@ -509,6 +466,7 @@ us_vhost_parse_args(int argc, char **argv)
                {"tso", required_argument, NULL, 0},
                {"client", no_argument, &client_mode, 1},
                {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
+               {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
                {NULL, 0, 0, 0},
        };
 
@@ -531,7 +489,6 @@ us_vhost_parse_args(int argc, char **argv)
                        vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
                                ETH_VMDQ_ACCEPT_BROADCAST |
                                ETH_VMDQ_ACCEPT_MULTICAST;
-                       rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
 
                        break;
 
@@ -619,7 +576,8 @@ us_vhost_parse_args(int argc, char **argv)
                                } else {
                                        mergeable = !!ret;
                                        if (ret) {
-                                               vmdq_conf_default.rxmode.jumbo_frame = 1;
+                                               vmdq_conf_default.rxmode.offloads |=
+                                                       DEV_RX_OFFLOAD_JUMBO_FRAME;
                                                vmdq_conf_default.rxmode.max_rx_pkt_len
                                                        = JUMBO_FRAME_MAX_SIZE;
                                        }
@@ -662,7 +620,7 @@ us_vhost_parse_args(int argc, char **argv)
 
        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
                if (enabled_port_mask & (1 << i))
-                       ports[num_ports++] = (uint8_t)i;
+                       ports[num_ports++] = i;
        }
 
        if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
@@ -690,9 +648,10 @@ static unsigned check_ports_num(unsigned nb_ports)
        }
 
        for (portid = 0; portid < num_ports; portid ++) {
-               if (ports[portid] >= nb_ports) {
-                       RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
-                               ports[portid], (nb_ports - 1));
+               if (!rte_eth_dev_is_valid_port(ports[portid])) {
+                       RTE_LOG(INFO, VHOST_PORT,
+                               "\nSpecified port ID(%u) is not valid\n",
+                               ports[portid]);
                        ports[portid] = INVALID_PORT_ID;
                        valid_num_ports--;
                }
@@ -700,8 +659,8 @@ static unsigned check_ports_num(unsigned nb_ports)
        return valid_num_ports;
 }
 
-static inline struct vhost_dev *__attribute__((always_inline))
-find_vhost_dev(struct ether_addr *mac)
+static __rte_always_inline struct vhost_dev *
+find_vhost_dev(struct rte_ether_addr *mac)
 {
        struct vhost_dev *vdev;
 
@@ -721,11 +680,11 @@ find_vhost_dev(struct ether_addr *mac)
 static int
 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
-       struct ether_hdr *pkt_hdr;
+       struct rte_ether_hdr *pkt_hdr;
        int i, ret;
 
        /* Learn MAC address of guest device from packet */
-       pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
        if (find_vhost_dev(&pkt_hdr->s_addr)) {
                RTE_LOG(ERR, VHOST_DATA,
@@ -800,13 +759,18 @@ unlink_vmdq(struct vhost_dev *vdev)
        }
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
            struct rte_mbuf *m)
 {
        uint16_t ret;
 
-       ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+       if (builtin_net_driver) {
+               ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+       } else {
+               ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+       }
+
        if (enable_stats) {
                rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
                rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
@@ -819,30 +783,30 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
  * Check if the packet destination MAC address is for a local device. If so then put
  * the packet on that devices RX queue. If not then return.
  */
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
-       struct ether_hdr *pkt_hdr;
+       struct rte_ether_hdr *pkt_hdr;
        struct vhost_dev *dst_vdev;
 
-       pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
        dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
        if (!dst_vdev)
                return -1;
 
        if (vdev->vid == dst_vdev->vid) {
-               RTE_LOG(DEBUG, VHOST_DATA,
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
                        "(%d) TX: src and dst MAC is same. Dropping packet.\n",
                        vdev->vid);
                return 0;
        }
 
-       RTE_LOG(DEBUG, VHOST_DATA,
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
                "(%d) TX: MAC address is local\n", dst_vdev->vid);
 
        if (unlikely(dst_vdev->remove)) {
-               RTE_LOG(DEBUG, VHOST_DATA,
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
                        "(%d) device is marked for removal\n", dst_vdev->vid);
                return 0;
        }
@@ -855,19 +819,20 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
  * Check if the destination MAC of a packet is one local VM,
  * and get its vlan tag, and offset if it is.
  */
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        uint32_t *offset, uint16_t *vlan_tag)
 {
        struct vhost_dev *dst_vdev;
-       struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       struct rte_ether_hdr *pkt_hdr =
+               rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
        dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
        if (!dst_vdev)
                return 0;
 
        if (vdev->vid == dst_vdev->vid) {
-               RTE_LOG(DEBUG, VHOST_DATA,
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
                        "(%d) TX: src and dst MAC is same. Dropping packet.\n",
                        vdev->vid);
                return -1;
@@ -881,7 +846,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        *offset  = VLAN_HLEN;
        *vlan_tag = vlan_tags[vdev->vid];
 
-       RTE_LOG(DEBUG, VHOST_DATA,
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
                "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
                vdev->vid, dst_vdev->vid, *vlan_tag);
 
@@ -902,7 +867,8 @@ static void virtio_tx_offload(struct rte_mbuf *m)
        void *l3_hdr;
        struct ipv4_hdr *ipv4_hdr = NULL;
        struct tcp_hdr *tcp_hdr = NULL;
-       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       struct rte_ether_hdr *eth_hdr =
+               rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
        l3_hdr = (char *)eth_hdr + m->l2_len;
 
@@ -923,7 +889,7 @@ free_pkts(struct rte_mbuf **pkts, uint16_t n)
                rte_pktmbuf_free(pkts[n]);
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 do_drain_mbuf_table(struct mbuf_table *tx_q)
 {
        uint16_t count;
@@ -940,21 +906,22 @@ do_drain_mbuf_table(struct mbuf_table *tx_q)
  * This function routes the TX packet to the correct interface. This
  * may be a local device or the physical port.
  */
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 {
        struct mbuf_table *tx_q;
        unsigned offset = 0;
        const uint16_t lcore_id = rte_lcore_id();
-       struct ether_hdr *nh;
+       struct rte_ether_hdr *nh;
 
 
-       nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
        if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
-                       virtio_xmit(vdev2, vdev, m);
+                       if (vdev2 != vdev)
+                               virtio_xmit(vdev2, vdev, m);
                }
                goto queue2nic;
        }
@@ -973,7 +940,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
                }
        }
 
-       RTE_LOG(DEBUG, VHOST_DATA,
+       RTE_LOG_DP(DEBUG, VHOST_DATA,
                "(%d) TX: MAC address is external\n", vdev->vid);
 
 queue2nic:
@@ -981,10 +948,10 @@ queue2nic:
        /*Add packet to the port tx queue*/
        tx_q = &lcore_tx_queue[lcore_id];
 
-       nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
        if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
                /* Guest has inserted the vlan tag. */
-               struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
+               struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
                uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
                if ((vm2vm_mode == VM2VM_HARDWARE) &&
                        (vh->vlan_tci != vlan_tag_be))
@@ -1028,7 +995,7 @@ queue2nic:
 }
 
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 drain_mbuf_table(struct mbuf_table *tx_q)
 {
        static uint64_t prev_tsc;
@@ -1041,14 +1008,14 @@ drain_mbuf_table(struct mbuf_table *tx_q)
        if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
                prev_tsc = cur_tsc;
 
-               RTE_LOG(DEBUG, VHOST_DATA,
+               RTE_LOG_DP(DEBUG, VHOST_DATA,
                        "TX queue drained after timeout with burst size %u\n",
                        tx_q->len);
                do_drain_mbuf_table(tx_q);
        }
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 drain_eth_rx(struct vhost_dev *vdev)
 {
        uint16_t rx_count, enqueue_count;
@@ -1077,8 +1044,13 @@ drain_eth_rx(struct vhost_dev *vdev)
                }
        }
 
-       enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+       if (builtin_net_driver) {
+               enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
                                                pkts, rx_count);
+       } else {
+               enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+                                               pkts, rx_count);
+       }
        if (enable_stats) {
                rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
                rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
@@ -1087,15 +1059,20 @@ drain_eth_rx(struct vhost_dev *vdev)
        free_pkts(pkts, rx_count);
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 drain_virtio_tx(struct vhost_dev *vdev)
 {
        struct rte_mbuf *pkts[MAX_PKT_BURST];
        uint16_t count;
        uint16_t i;
 
-       count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
+       if (builtin_net_driver) {
+               count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
                                        pkts, MAX_PKT_BURST);
+       } else {
+               count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
+                                       mbuf_pool, pkts, MAX_PKT_BURST);
+       }
 
        /* setup VMDq for the first packet */
        if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1198,6 +1175,9 @@ destroy_device(int vid)
                rte_pause();
        }
 
+       if (builtin_net_driver)
+               vs_vhost_net_remove(vdev);
+
        TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
                     lcore_vdev_entry);
        TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
@@ -1228,7 +1208,7 @@ destroy_device(int vid)
 
 /*
  * A new device is added to a data core. First the device is added to the main linked list
- * and the allocated to a specific data core.
+ * and then allocated to a specific data core.
  */
 static int
 new_device(int vid)
@@ -1246,6 +1226,9 @@ new_device(int vid)
        }
        vdev->vid = vid;
 
+       if (builtin_net_driver)
+               vs_vhost_net_setup(vdev);
+
        TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
        vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
 
@@ -1281,7 +1264,7 @@ new_device(int vid)
  * These callback allow devices to be added to the data core when configuration
  * has been fully complete.
  */
-static const struct virtio_net_device_ops virtio_net_device_ops =
+static const struct vhost_device_ops virtio_net_device_ops =
 {
        .new_device =  new_device,
        .destroy_device = destroy_device,
@@ -1291,8 +1274,8 @@ static const struct virtio_net_device_ops virtio_net_device_ops =
  * This is a thread will wake up after a period to print stats if the user has
  * enabled them.
  */
-static void
-print_stats(void)
+static void *
+print_stats(__rte_unused void *arg)
 {
        struct vhost_dev *vdev;
        uint64_t tx_dropped, rx_dropped;
@@ -1331,6 +1314,8 @@ print_stats(void)
 
                printf("===================================================\n");
        }
+
+       return NULL;
 }
 
 static void
@@ -1393,7 +1378,7 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
                mtu = 64 * 1024;
 
        nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
-                       (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
+                       (mbuf_size - RTE_PKTMBUF_HEADROOM);
        nr_mbufs_per_core += nr_rx_desc;
        nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
 
@@ -1417,9 +1402,8 @@ main(int argc, char *argv[])
        unsigned lcore_id, core_id = 0;
        unsigned nb_ports, valid_num_ports;
        int ret, i;
-       uint8_t portid;
+       uint16_t portid;
        static pthread_t tid;
-       char thread_name[RTE_MAX_THREAD_NAME_LEN];
        uint64_t flags = 0;
 
        signal(SIGINT, sigint_handler);
@@ -1436,17 +1420,18 @@ main(int argc, char *argv[])
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Invalid argument\n");
 
-       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
 
                if (rte_lcore_is_enabled(lcore_id))
-                       lcore_ids[core_id ++] = lcore_id;
+                       lcore_ids[core_id++] = lcore_id;
+       }
 
        if (rte_lcore_count() > RTE_MAX_LCORE)
                rte_exit(EXIT_FAILURE,"Not enough cores\n");
 
        /* Get the number of physical ports. */
-       nb_ports = rte_eth_dev_count();
+       nb_ports = rte_eth_dev_count_avail();
 
        /*
         * Update the global var NUM_PORTS and global array PORTS
@@ -1477,7 +1462,7 @@ main(int argc, char *argv[])
        }
 
        /* initialize all ports */
-       for (portid = 0; portid < nb_ports; portid++) {
+       RTE_ETH_FOREACH_DEV(portid) {
                /* skip ports that are not enabled */
                if ((enabled_port_mask & (1 << portid)) == 0) {
                        RTE_LOG(INFO, VHOST_PORT,
@@ -1491,26 +1476,17 @@ main(int argc, char *argv[])
 
        /* Enable stats if the user option is set. */
        if (enable_stats) {
-               ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
-               if (ret != 0)
+               ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
+                                       print_stats, NULL);
+               if (ret < 0)
                        rte_exit(EXIT_FAILURE,
                                "Cannot create print-stats thread\n");
-
-               /* Set thread_name for aid in debugging.  */
-               snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
-               ret = rte_thread_setname(tid, thread_name);
-               if (ret != 0)
-                       RTE_LOG(DEBUG, VHOST_CONFIG,
-                               "Cannot set print-stats name\n");
        }
 
        /* Launch all data cores. */
        RTE_LCORE_FOREACH_SLAVE(lcore_id)
                rte_eal_remote_launch(switch_worker, NULL, lcore_id);
 
-       if (mergeable == 0)
-               rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
-
        if (client_mode)
                flags |= RTE_VHOST_USER_CLIENT;
 
@@ -1519,18 +1495,59 @@ main(int argc, char *argv[])
 
        /* Register vhost user driver to handle vhost messages. */
        for (i = 0; i < nb_sockets; i++) {
-               ret = rte_vhost_driver_register
-                               (socket_files + i * PATH_MAX, flags);
+               char *file = socket_files + i * PATH_MAX;
+               ret = rte_vhost_driver_register(file, flags);
                if (ret != 0) {
                        unregister_drivers(i);
                        rte_exit(EXIT_FAILURE,
                                "vhost driver register failure.\n");
                }
+
+               if (builtin_net_driver)
+                       rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
+
+               if (mergeable == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_MRG_RXBUF);
+               }
+
+               if (enable_tx_csum == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_CSUM);
+               }
+
+               if (enable_tso == 0) {
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_HOST_TSO4);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_HOST_TSO6);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_GUEST_TSO4);
+                       rte_vhost_driver_disable_features(file,
+                               1ULL << VIRTIO_NET_F_GUEST_TSO6);
+               }
+
+               if (promiscuous) {
+                       rte_vhost_driver_enable_features(file,
+                               1ULL << VIRTIO_NET_F_CTRL_RX);
+               }
+
+               ret = rte_vhost_driver_callback_register(file,
+                       &virtio_net_device_ops);
+               if (ret != 0) {
+                       rte_exit(EXIT_FAILURE,
+                               "failed to register vhost driver callbacks.\n");
+               }
+
+               if (rte_vhost_driver_start(file) < 0) {
+                       rte_exit(EXIT_FAILURE,
+                               "failed to start vhost driver.\n");
+               }
        }
 
-       rte_vhost_driver_callback_register(&virtio_net_device_ops);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id)
+               rte_eal_wait_lcore(lcore_id);
 
-       rte_vhost_driver_session_start();
        return 0;
 
 }