X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost%2Fmain.c;h=d7b34b3d411bc025f1049b8c3ba7a8b0d77d681a;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=92eaffe84b762d0e7d13b23204b5c2cf8682c74e;hpb=7f262239ab501656e36de0f58f26a9e9f44a2d49;p=dpdk.git diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 92eaffe84b..d7b34b3d41 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -49,9 +20,10 @@ #include #include #include -#include +#include #include #include +#include #include "main.h" @@ -65,7 +37,6 @@ #define MBUF_CACHE_SIZE 128 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE -#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */ @@ -84,15 +55,6 @@ #define INVALID_PORT_ID 0xFF -/* Max number of devices. Limited by vmdq. */ -#define MAX_DEVICES 64 - -/* Size of buffers used for snprintfs. */ -#define MAX_PRINT_BUFF 6072 - -/* Maximum character device basename size. */ -#define MAX_BASENAME_SZ 10 - /* Maximum long option length for option parsing. */ #define MAX_LONG_OPT_SZ 64 @@ -109,9 +71,6 @@ static uint32_t num_devices; static struct rte_mempool *mbuf_pool; static int mergeable; -/* Do vlan strip on host, enabled on default */ -static uint32_t vlan_strip = 1; - /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ typedef enum { VM2VM_DISABLED = 0, @@ -132,34 +91,40 @@ static uint32_t enable_tx_csum; /* Disable TSO offload */ static uint32_t enable_tso; +static int client_mode; +static int dequeue_zero_copy; + +static int builtin_net_driver; + /* Specify timeout (in useconds) between retries on RX. */ static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; /* Specify the number of retries on RX. */ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; -/* Character device basename. Can be set by user. */ -static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; +/* Socket file paths. Can be set by user */ +static char *socket_files; +static int nb_sockets; /* empty vmdq configuration structure. Filled in programatically */ static struct rte_eth_conf vmdq_conf_default = { .rxmode = { .mq_mode = ETH_MQ_RX_VMDQ_ONLY, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ /* - * It is necessary for 1G NIC such as I350, + * VLAN strip is necessary for 1G NIC such as I350, * this fixes bug of ipv4 forwarding in guest can't * forward pakets from one virtio dev to another virtio dev. */ - .hw_vlan_strip = 1, /**< VLAN strip enabled. */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ + .offloads = DEV_RX_OFFLOAD_VLAN_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO), }, .rx_adv_conf = { /* @@ -176,8 +141,9 @@ static struct rte_eth_conf vmdq_conf_default = { }, }; + static unsigned lcore_ids[RTE_MAX_LCORE]; -static uint8_t ports[RTE_MAX_ETHPORTS]; +static uint16_t ports[RTE_MAX_ETHPORTS]; static unsigned num_ports = 0; /**< The number of ports specified in command line */ static uint16_t num_pf_queues, num_vmdq_queues; static uint16_t vmdq_pool_base, vmdq_queue_base; @@ -195,7 +161,7 @@ const uint16_t vlan_tags[] = { }; /* ethernet addresses of ports */ -static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; +static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; static struct vhost_dev_tailq_list vhost_dev_list = TAILQ_HEAD_INITIALIZER(vhost_dev_list); @@ -245,27 +211,12 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) return 0; } -/* - * Validate the device number according to the max pool number gotten form - * dev_info. If the device number is invalid, give the error message and - * return -1. Each device must have its own pool. - */ -static inline int -validate_num_devices(uint32_t max_nb_devices) -{ - if (num_devices > max_nb_devices) { - RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n"); - return -1; - } - return 0; -} - /* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int -port_init(uint8_t port) +port_init(uint16_t port) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; @@ -279,29 +230,27 @@ port_init(uint8_t port) /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ rte_eth_dev_info_get (port, &dev_info); - if (dev_info.max_rx_queues > MAX_QUEUES) { - rte_exit(EXIT_FAILURE, - "please define MAX_QUEUES no less than %u in %s\n", - dev_info.max_rx_queues, __FILE__); - } - rxconf = &dev_info.default_rxconf; txconf = &dev_info.default_txconf; rxconf->rx_drop_en = 1; - /* Enable vlan offload */ - txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; - /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; - tx_rings = (uint16_t)rte_lcore_count(); - retval = validate_num_devices(MAX_DEVICES); - if (retval < 0) - return retval; + /* + * When dequeue zero copy is enabled, guest Tx used vring will be + * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc + * (tx_ring_size here) must be small enough so that the driver will + * hit the free threshold easily and free mbufs timely. Otherwise, + * guest Tx vring would be starved. + */ + if (dequeue_zero_copy) + tx_ring_size = 64; + + tx_rings = (uint16_t)rte_lcore_count(); /* Get port configuration. */ retval = get_eth_conf(&port_conf, num_devices); @@ -317,43 +266,66 @@ port_init(uint8_t port) printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", num_pf_queues, num_devices, queues_per_pool); - if (port >= rte_eth_dev_count()) return -1; - - if (enable_tx_csum == 0) - rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM); - - if (enable_tso == 0) { - rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4); - rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6); - } + if (!rte_eth_dev_is_valid_port(port)) + return -1; rx_rings = (uint16_t)dev_info.max_rx_queues; + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); - if (retval != 0) + if (retval != 0) { + RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n", + port, strerror(-retval)); + return retval; + } + + retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size, + &tx_ring_size); + if (retval != 0) { + RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors " + "for port %u: %s.\n", port, strerror(-retval)); return retval; + } + if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) { + RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size " + "for Rx queues on port %u.\n", port); + return -1; + } /* Setup the queues. */ + rxconf->offloads = port_conf.rxmode.offloads; for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), rxconf, mbuf_pool); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup rx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } + txconf->offloads = port_conf.txmode.offloads; for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), txconf); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup tx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) { - RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n"); + RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n", + port, strerror(-retval)); return retval; } @@ -364,7 +336,7 @@ port_init(uint8_t port) RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", - (unsigned)port, + port, vmdq_ports_eth_addr[port].addr_bytes[0], vmdq_ports_eth_addr[port].addr_bytes[1], vmdq_ports_eth_addr[port].addr_bytes[2], @@ -376,17 +348,26 @@ port_init(uint8_t port) } /* - * Set character device basename. + * Set socket file path. */ static int -us_vhost_parse_basename(const char *q_arg) +us_vhost_parse_socket_path(const char *q_arg) { + char *old; + /* parse number string */ + if (strnlen(q_arg, PATH_MAX) == PATH_MAX) + return -1; - if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ) + old = socket_files; + socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1)); + if (socket_files == NULL) { + free(old); return -1; - else - snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg); + } + + strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX); + nb_sockets++; return 0; } @@ -446,7 +427,7 @@ us_vhost_usage(const char *prgname) RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" " --vm2vm [0|1|2]\n" " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" - " --dev-basename \n" + " --socket-file \n" " --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" @@ -454,11 +435,12 @@ us_vhost_usage(const char *prgname) " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" - " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" - " --dev-basename: The basename to be used for the character device.\n" + " --socket-file: The path of the socket file.\n" " --tx-csum [0|1] disable/enable TX checksum offload.\n" - " --tso [0|1] disable/enable TCP segment offload.\n", + " --tso [0|1] disable/enable TCP segment offload.\n" + " --client register a vhost-user socket as client mode.\n" + " --dequeue-zero-copy enables dequeue zero copy\n", prgname); } @@ -478,11 +460,13 @@ us_vhost_parse_args(int argc, char **argv) {"rx-retry-delay", required_argument, NULL, 0}, {"rx-retry-num", required_argument, NULL, 0}, {"mergeable", required_argument, NULL, 0}, - {"vlan-strip", required_argument, NULL, 0}, {"stats", required_argument, NULL, 0}, - {"dev-basename", required_argument, NULL, 0}, + {"socket-file", required_argument, NULL, 0}, {"tx-csum", required_argument, NULL, 0}, {"tso", required_argument, NULL, 0}, + {"client", no_argument, &client_mode, 1}, + {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1}, + {"builtin-net-driver", no_argument, &builtin_net_driver, 1}, {NULL, 0, 0, 0}, }; @@ -505,7 +489,6 @@ us_vhost_parse_args(int argc, char **argv) vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = ETH_VMDQ_ACCEPT_BROADCAST | ETH_VMDQ_ACCEPT_MULTICAST; - rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX); break; @@ -593,34 +576,20 @@ us_vhost_parse_args(int argc, char **argv) } else { mergeable = !!ret; if (ret) { - vmdq_conf_default.rxmode.jumbo_frame = 1; + vmdq_conf_default.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; vmdq_conf_default.rxmode.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE; } } } - /* Enable/disable RX VLAN strip on host. */ - if (!strncmp(long_option[option_index].name, - "vlan-strip", MAX_LONG_OPT_SZ)) { - ret = parse_num_opt(optarg, 1); - if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, - "Invalid argument for VLAN strip [0|1]\n"); - us_vhost_usage(prgname); - return -1; - } else { - vlan_strip = !!ret; - vmdq_conf_default.rxmode.hw_vlan_strip = - vlan_strip; - } - } - /* Enable/disable stats. */ if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for stats [0..N]\n"); us_vhost_usage(prgname); return -1; } else { @@ -628,10 +597,13 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Set character device basename. */ - if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) { - if (us_vhost_parse_basename(optarg) == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ); + /* Set socket file path. */ + if (!strncmp(long_option[option_index].name, + "socket-file", MAX_LONG_OPT_SZ)) { + if (us_vhost_parse_socket_path(optarg) == -1) { + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for socket name (Max %d characters)\n", + PATH_MAX); us_vhost_usage(prgname); return -1; } @@ -648,7 +620,7 @@ us_vhost_parse_args(int argc, char **argv) for (i = 0; i < RTE_MAX_ETHPORTS; i++) { if (enabled_port_mask & (1 << i)) - ports[num_ports++] = (uint8_t)i; + ports[num_ports++] = i; } if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) { @@ -676,9 +648,10 @@ static unsigned check_ports_num(unsigned nb_ports) } for (portid = 0; portid < num_ports; portid ++) { - if (ports[portid] >= nb_ports) { - RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n", - ports[portid], (nb_ports - 1)); + if (!rte_eth_dev_is_valid_port(ports[portid])) { + RTE_LOG(INFO, VHOST_PORT, + "\nSpecified port ID(%u) is not valid\n", + ports[portid]); ports[portid] = INVALID_PORT_ID; valid_num_ports--; } @@ -686,14 +659,14 @@ static unsigned check_ports_num(unsigned nb_ports) return valid_num_ports; } -static inline struct vhost_dev *__attribute__((always_inline)) -find_vhost_dev(struct ether_addr *mac) +static __rte_always_inline struct vhost_dev * +find_vhost_dev(struct rte_ether_addr *mac) { struct vhost_dev *vdev; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { if (vdev->ready == DEVICE_RX && - is_same_ether_addr(mac, &vdev->mac_address)) + rte_is_same_ether_addr(mac, &vdev->mac_address)) return vdev; } @@ -707,29 +680,29 @@ find_vhost_dev(struct ether_addr *mac) static int link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) { - struct ether_hdr *pkt_hdr; + struct rte_ether_hdr *pkt_hdr; int i, ret; /* Learn MAC address of guest device from packet */ - pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); if (find_vhost_dev(&pkt_hdr->s_addr)) { RTE_LOG(ERR, VHOST_DATA, "(%d) device is using a registered MAC!\n", - vdev->device_fh); + vdev->vid); return -1; } - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; /* vlan_tag currently uses the device_id. */ - vdev->vlan_tag = vlan_tags[vdev->device_fh]; + vdev->vlan_tag = vlan_tags[vdev->vid]; /* Print out VMDQ registration info. */ RTE_LOG(INFO, VHOST_DATA, "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n", - vdev->device_fh, + vdev->vid, vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], @@ -737,16 +710,13 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) /* Register the MAC address. */ ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, - (uint32_t)vdev->device_fh + vmdq_pool_base); + (uint32_t)vdev->vid + vmdq_pool_base); if (ret) RTE_LOG(ERR, VHOST_DATA, "(%d) failed to add device MAC address to VMDQ\n", - vdev->device_fh); + vdev->vid); - /* Enable stripping of the vlan tag as we handle routing. */ - if (vlan_strip) - rte_eth_dev_set_vlan_strip_on_queue(ports[0], - (uint16_t)vdev->vmdq_rx_q, 1); + rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; @@ -789,13 +759,18 @@ unlink_vmdq(struct vhost_dev *vdev) } } -static inline void __attribute__((always_inline)) +static __rte_always_inline void virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, struct rte_mbuf *m) { uint16_t ret; - ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1); + if (builtin_net_driver) { + ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1); + } else { + ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1); + } + if (enable_stats) { rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic); rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret); @@ -808,31 +783,31 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, * Check if the packet destination MAC address is for a local device. If so then put * the packet on that devices RX queue. If not then return. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) { - struct ether_hdr *pkt_hdr; + struct rte_ether_hdr *pkt_hdr; struct vhost_dev *dst_vdev; - pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); if (!dst_vdev) return -1; - if (vdev->device_fh == dst_vdev->device_fh) { - RTE_LOG(DEBUG, VHOST_DATA, + if (vdev->vid == dst_vdev->vid) { + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: src and dst MAC is same. Dropping packet.\n", - vdev->device_fh); + vdev->vid); return 0; } - RTE_LOG(DEBUG, VHOST_DATA, - "(%d) TX: MAC address is local\n", dst_vdev->device_fh); + RTE_LOG_DP(DEBUG, VHOST_DATA, + "(%d) TX: MAC address is local\n", dst_vdev->vid); if (unlikely(dst_vdev->remove)) { - RTE_LOG(DEBUG, VHOST_DATA, - "(%d) device is marked for removal\n", dst_vdev->device_fh); + RTE_LOG_DP(DEBUG, VHOST_DATA, + "(%d) device is marked for removal\n", dst_vdev->vid); return 0; } @@ -844,21 +819,22 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) * Check if the destination MAC of a packet is one local VM, * and get its vlan tag, and offset if it is. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, uint32_t *offset, uint16_t *vlan_tag) { struct vhost_dev *dst_vdev; - struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + struct rte_ether_hdr *pkt_hdr = + rte_pktmbuf_mtod(m, struct rte_ether_hdr *); dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); if (!dst_vdev) return 0; - if (vdev->device_fh == dst_vdev->device_fh) { - RTE_LOG(DEBUG, VHOST_DATA, + if (vdev->vid == dst_vdev->vid) { + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: src and dst MAC is same. Dropping packet.\n", - vdev->device_fh); + vdev->vid); return -1; } @@ -868,11 +844,11 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, * the packet length by plus it. */ *offset = VLAN_HLEN; - *vlan_tag = vlan_tags[vdev->device_fh]; + *vlan_tag = vlan_tags[vdev->vid]; - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n", - vdev->device_fh, dst_vdev->device_fh, *vlan_tag); + vdev->vid, dst_vdev->vid, *vlan_tag); return 0; } @@ -882,7 +858,7 @@ get_psd_sum(void *l3_hdr, uint64_t ol_flags) { if (ol_flags & PKT_TX_IPV4) return rte_ipv4_phdr_cksum(l3_hdr, ol_flags); - else /* assume ethertype == ETHER_TYPE_IPv6 */ + else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */ return rte_ipv6_phdr_cksum(l3_hdr, ol_flags); } @@ -891,7 +867,8 @@ static void virtio_tx_offload(struct rte_mbuf *m) void *l3_hdr; struct ipv4_hdr *ipv4_hdr = NULL; struct tcp_hdr *tcp_hdr = NULL; - struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + struct rte_ether_hdr *eth_hdr = + rte_pktmbuf_mtod(m, struct rte_ether_hdr *); l3_hdr = (char *)eth_hdr + m->l2_len; @@ -912,7 +889,7 @@ free_pkts(struct rte_mbuf **pkts, uint16_t n) rte_pktmbuf_free(pkts[n]); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void do_drain_mbuf_table(struct mbuf_table *tx_q) { uint16_t count; @@ -929,21 +906,22 @@ do_drain_mbuf_table(struct mbuf_table *tx_q) * This function routes the TX packet to the correct interface. This * may be a local device or the physical port. */ -static inline void __attribute__((always_inline)) +static __rte_always_inline void virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) { struct mbuf_table *tx_q; unsigned offset = 0; const uint16_t lcore_id = rte_lcore_id(); - struct ether_hdr *nh; + struct rte_ether_hdr *nh; - nh = rte_pktmbuf_mtod(m, struct ether_hdr *); - if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) { + nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) { struct vhost_dev *vdev2; TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) { - virtio_xmit(vdev2, vdev, m); + if (vdev2 != vdev) + virtio_xmit(vdev2, vdev, m); } goto queue2nic; } @@ -962,18 +940,18 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) } } - RTE_LOG(DEBUG, VHOST_DATA, - "(%d) TX: MAC address is external\n", vdev->device_fh); + RTE_LOG_DP(DEBUG, VHOST_DATA, + "(%d) TX: MAC address is external\n", vdev->vid); queue2nic: /*Add packet to the port tx queue*/ tx_q = &lcore_tx_queue[lcore_id]; - nh = rte_pktmbuf_mtod(m, struct ether_hdr *); - if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) { + nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) { /* Guest has inserted the vlan tag. */ - struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1); + struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1); uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); if ((vm2vm_mode == VM2VM_HARDWARE) && (vh->vlan_tci != vlan_tag_be)) @@ -1017,7 +995,7 @@ queue2nic: } -static inline void __attribute__((always_inline)) +static __rte_always_inline void drain_mbuf_table(struct mbuf_table *tx_q) { static uint64_t prev_tsc; @@ -1030,18 +1008,17 @@ drain_mbuf_table(struct mbuf_table *tx_q) if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) { prev_tsc = cur_tsc; - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "TX queue drained after timeout with burst size %u\n", tx_q->len); do_drain_mbuf_table(tx_q); } } -static inline void __attribute__((always_inline)) +static __rte_always_inline void drain_eth_rx(struct vhost_dev *vdev) { uint16_t rx_count, enqueue_count; - struct virtio_net *dev = vdev->dev; struct rte_mbuf *pkts[MAX_PKT_BURST]; rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, @@ -1055,20 +1032,25 @@ drain_eth_rx(struct vhost_dev *vdev) * to diminish packet loss. */ if (enable_retry && - unlikely(rx_count > rte_vring_available_entries(dev, + unlikely(rx_count > rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) { uint32_t retry; for (retry = 0; retry < burst_rx_retry_num; retry++) { rte_delay_us(burst_rx_delay_time); - if (rx_count <= rte_vring_available_entries(dev, + if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ)) break; } } - enqueue_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, + if (builtin_net_driver) { + enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count); + } else { + enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, + pkts, rx_count); + } if (enable_stats) { rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count); rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count); @@ -1077,15 +1059,20 @@ drain_eth_rx(struct vhost_dev *vdev) free_pkts(pkts, rx_count); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void drain_virtio_tx(struct vhost_dev *vdev) { struct rte_mbuf *pkts[MAX_PKT_BURST]; uint16_t count; uint16_t i; - count = rte_vhost_dequeue_burst(vdev->dev, VIRTIO_TXQ, mbuf_pool, + if (builtin_net_driver) { + count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST); + } else { + count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, + mbuf_pool, pkts, MAX_PKT_BURST); + } /* setup VMDq for the first packet */ if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { @@ -1094,7 +1081,7 @@ drain_virtio_tx(struct vhost_dev *vdev) } for (i = 0; i < count; ++i) - virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->device_fh]); + virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]); } /* @@ -1171,18 +1158,26 @@ switch_worker(void *arg __rte_unused) * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. */ static void -destroy_device (volatile struct virtio_net *dev) +destroy_device(int vid) { - struct vhost_dev *vdev; + struct vhost_dev *vdev = NULL; int lcore; - vdev = (struct vhost_dev *)dev->priv; + TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { + if (vdev->vid == vid) + break; + } + if (!vdev) + return; /*set the remove flag. */ vdev->remove = 1; while(vdev->ready != DEVICE_SAFE_REMOVE) { rte_pause(); } + if (builtin_net_driver) + vs_vhost_net_remove(vdev); + TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, lcore_vdev_entry); TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry); @@ -1206,36 +1201,36 @@ destroy_device (volatile struct virtio_net *dev) RTE_LOG(INFO, VHOST_DATA, "(%d) device has been removed from data core\n", - vdev->device_fh); + vdev->vid); rte_free(vdev); } /* * A new device is added to a data core. First the device is added to the main linked list - * and the allocated to a specific data core. + * and then allocated to a specific data core. */ static int -new_device (struct virtio_net *dev) +new_device(int vid) { int lcore, core_add = 0; uint32_t device_num_min = num_devices; struct vhost_dev *vdev; - int device_fh = dev->device_fh; vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); if (vdev == NULL) { RTE_LOG(INFO, VHOST_DATA, "(%d) couldn't allocate memory for vhost dev\n", - device_fh); + vid); return -1; } - vdev->dev = dev; - dev->priv = vdev; - vdev->device_fh = device_fh; + vdev->vid = vid; + + if (builtin_net_driver) + vs_vhost_net_setup(vdev); TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); - vdev->vmdq_rx_q = device_fh * queues_per_pool + vmdq_queue_base; + vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base; /*reset ready flag*/ vdev->ready = DEVICE_MAC_LEARNING; @@ -1255,12 +1250,12 @@ new_device (struct virtio_net *dev) lcore_info[vdev->coreid].device_num++; /* Disable notifications. */ - rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0); - rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0); + rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0); + rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0); RTE_LOG(INFO, VHOST_DATA, "(%d) device has been added to data core %d\n", - device_fh, vdev->coreid); + vid, vdev->coreid); return 0; } @@ -1269,7 +1264,7 @@ new_device (struct virtio_net *dev) * These callback allow devices to be added to the data core when configuration * has been fully complete. */ -static const struct virtio_net_device_ops virtio_net_device_ops = +static const struct vhost_device_ops virtio_net_device_ops = { .new_device = new_device, .destroy_device = destroy_device, @@ -1279,8 +1274,8 @@ static const struct virtio_net_device_ops virtio_net_device_ops = * This is a thread will wake up after a period to print stats if the user has * enabled them. */ -static void -print_stats(void) +static void * +print_stats(__rte_unused void *arg) { struct vhost_dev *vdev; uint64_t tx_dropped, rx_dropped; @@ -1312,13 +1307,29 @@ print_stats(void) "RX total: %" PRIu64 "\n" "RX dropped: %" PRIu64 "\n" "RX successful: %" PRIu64 "\n", - vdev->dev->device_fh, + vdev->vid, tx_total, tx_dropped, tx, rx_total, rx_dropped, rx); } printf("===================================================\n"); } + + return NULL; +} + +static void +unregister_drivers(int socket_num) +{ + int i, ret; + + for (i = 0; i < socket_num; i++) { + ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX); + if (ret != 0) + RTE_LOG(ERR, VHOST_CONFIG, + "Fail to unregister vhost driver for %s.\n", + socket_files + i * PATH_MAX); + } } /* When we receive a INT signal, unregister vhost driver */ @@ -1326,9 +1337,8 @@ static void sigint_handler(__rte_unused int signum) { /* Unregister vhost driver. */ - int ret = rte_vhost_driver_unregister((char *)&dev_basename); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n"); + unregister_drivers(nb_sockets); + exit(0); } @@ -1368,7 +1378,7 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, mtu = 64 * 1024; nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST / - (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST; + (mbuf_size - RTE_PKTMBUF_HEADROOM); nr_mbufs_per_core += nr_rx_desc; nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache); @@ -1384,18 +1394,17 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, } /* - * Main function, does initialisation and calls the per-lcore functions. The CUSE - * device is also registered here to handle the IOCTLs. + * Main function, does initialisation and calls the per-lcore functions. */ int main(int argc, char *argv[]) { unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; - int ret; - uint8_t portid; + int ret, i; + uint16_t portid; static pthread_t tid; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; + uint64_t flags = 0; signal(SIGINT, sigint_handler); @@ -1411,17 +1420,18 @@ main(int argc, char *argv[]) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid argument\n"); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { TAILQ_INIT(&lcore_info[lcore_id].vdev_list); if (rte_lcore_is_enabled(lcore_id)) - lcore_ids[core_id ++] = lcore_id; + lcore_ids[core_id++] = lcore_id; + } if (rte_lcore_count() > RTE_MAX_LCORE) rte_exit(EXIT_FAILURE,"Not enough cores\n"); /* Get the number of physical ports. */ - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); /* * Update the global var NUM_PORTS and global array PORTS @@ -1452,7 +1462,7 @@ main(int argc, char *argv[]) } /* initialize all ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { RTE_LOG(INFO, VHOST_PORT, @@ -1466,35 +1476,78 @@ main(int argc, char *argv[]) /* Enable stats if the user option is set. */ if (enable_stats) { - ret = pthread_create(&tid, NULL, (void *)print_stats, NULL); - if (ret != 0) + ret = rte_ctrl_thread_create(&tid, "print-stats", NULL, + print_stats, NULL); + if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot create print-stats thread\n"); - - /* Set thread_name for aid in debugging. */ - snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats"); - ret = rte_thread_setname(tid, thread_name); - if (ret != 0) - RTE_LOG(DEBUG, VHOST_CONFIG, - "Cannot set print-stats name\n"); } /* Launch all data cores. */ RTE_LCORE_FOREACH_SLAVE(lcore_id) rte_eal_remote_launch(switch_worker, NULL, lcore_id); - if (mergeable == 0) - rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); + if (client_mode) + flags |= RTE_VHOST_USER_CLIENT; - /* Register vhost(cuse or user) driver to handle vhost messages. */ - ret = rte_vhost_driver_register((char *)&dev_basename); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver register failure.\n"); + if (dequeue_zero_copy) + flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY; - rte_vhost_driver_callback_register(&virtio_net_device_ops); + /* Register vhost user driver to handle vhost messages. */ + for (i = 0; i < nb_sockets; i++) { + char *file = socket_files + i * PATH_MAX; + ret = rte_vhost_driver_register(file, flags); + if (ret != 0) { + unregister_drivers(i); + rte_exit(EXIT_FAILURE, + "vhost driver register failure.\n"); + } + + if (builtin_net_driver) + rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES); + + if (mergeable == 0) { + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_MRG_RXBUF); + } + + if (enable_tx_csum == 0) { + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_CSUM); + } + + if (enable_tso == 0) { + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_HOST_TSO4); + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_HOST_TSO6); + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_GUEST_TSO4); + rte_vhost_driver_disable_features(file, + 1ULL << VIRTIO_NET_F_GUEST_TSO6); + } + + if (promiscuous) { + rte_vhost_driver_enable_features(file, + 1ULL << VIRTIO_NET_F_CTRL_RX); + } + + ret = rte_vhost_driver_callback_register(file, + &virtio_net_device_ops); + if (ret != 0) { + rte_exit(EXIT_FAILURE, + "failed to register vhost driver callbacks.\n"); + } + + if (rte_vhost_driver_start(file) < 0) { + rte_exit(EXIT_FAILURE, + "failed to start vhost driver.\n"); + } + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) + rte_eal_wait_lcore(lcore_id); - /* Start CUSE session. */ - rte_vhost_driver_session_start(); return 0; }