X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost%2Fmain.c;h=47899473b67d6f797825bee7023a3f4fe2023c8f;hb=1bed3a34d55a7e69df4a3f0787393da0ac6f5b7d;hp=77214a697d64ed8445a3cf9a7e37ea117e319591;hpb=16ae8abe1cb8b545d4ca5cdee0e3db32c6b8b498;p=dpdk.git diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 77214a697d..47899473b6 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -90,9 +90,6 @@ /* Size of buffers used for snprintfs. */ #define MAX_PRINT_BUFF 6072 -/* Maximum character device basename size. */ -#define MAX_BASENAME_SZ 10 - /* Maximum long option length for option parsing. */ #define MAX_LONG_OPT_SZ 64 @@ -109,9 +106,6 @@ static uint32_t num_devices; static struct rte_mempool *mbuf_pool; static int mergeable; -/* Do vlan strip on host, enabled on default */ -static uint32_t vlan_strip = 1; - /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ typedef enum { VM2VM_DISABLED = 0, @@ -132,13 +126,17 @@ static uint32_t enable_tx_csum; /* Disable TSO offload */ static uint32_t enable_tso; +static int client_mode; +static int dequeue_zero_copy; + /* Specify timeout (in useconds) between retries on RX. */ static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; /* Specify the number of retries on RX. */ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; -/* Character device basename. Can be set by user. */ -static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; +/* Socket file paths. Can be set by user */ +static char *socket_files; +static int nb_sockets; /* empty vmdq configuration structure. Filled in programatically */ static struct rte_eth_conf vmdq_conf_default = { @@ -297,6 +295,17 @@ port_init(uint8_t port) rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; + + /* + * When dequeue zero copy is enabled, guest Tx used vring will be + * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc + * (tx_ring_size here) must be small enough so that the driver will + * hit the free threshold easily and free mbufs timely. Otherwise, + * guest Tx vring would be starved. + */ + if (dequeue_zero_copy) + tx_ring_size = 64; + tx_rings = (uint16_t)rte_lcore_count(); retval = validate_num_devices(MAX_DEVICES); @@ -325,13 +334,18 @@ port_init(uint8_t port) if (enable_tso == 0) { rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4); rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6); + rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4); + rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6); } rx_rings = (uint16_t)dev_info.max_rx_queues; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); - if (retval != 0) + if (retval != 0) { + RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n", + port, strerror(-retval)); return retval; + } /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { @@ -339,21 +353,30 @@ port_init(uint8_t port) rte_eth_dev_socket_id(port), rxconf, mbuf_pool); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup rx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), txconf); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup tx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) { - RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n"); + RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n", + port, strerror(-retval)); return retval; } @@ -376,17 +399,18 @@ port_init(uint8_t port) } /* - * Set character device basename. + * Set socket file path. */ static int -us_vhost_parse_basename(const char *q_arg) +us_vhost_parse_socket_path(const char *q_arg) { /* parse number string */ - - if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ) + if (strnlen(q_arg, PATH_MAX) > PATH_MAX) return -1; - else - snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg); + + socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1)); + snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg); + nb_sockets++; return 0; } @@ -446,7 +470,7 @@ us_vhost_usage(const char *prgname) RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" " --vm2vm [0|1|2]\n" " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" - " --dev-basename \n" + " --socket-file \n" " --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" @@ -454,11 +478,12 @@ us_vhost_usage(const char *prgname) " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" - " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" - " --dev-basename: The basename to be used for the character device.\n" + " --socket-file: The path of the socket file.\n" " --tx-csum [0|1] disable/enable TX checksum offload.\n" - " --tso [0|1] disable/enable TCP segment offload.\n", + " --tso [0|1] disable/enable TCP segment offload.\n" + " --client register a vhost-user socket as client mode.\n" + " --dequeue-zero-copy enables dequeue zero copy\n", prgname); } @@ -478,11 +503,12 @@ us_vhost_parse_args(int argc, char **argv) {"rx-retry-delay", required_argument, NULL, 0}, {"rx-retry-num", required_argument, NULL, 0}, {"mergeable", required_argument, NULL, 0}, - {"vlan-strip", required_argument, NULL, 0}, {"stats", required_argument, NULL, 0}, - {"dev-basename", required_argument, NULL, 0}, + {"socket-file", required_argument, NULL, 0}, {"tx-csum", required_argument, NULL, 0}, {"tso", required_argument, NULL, 0}, + {"client", no_argument, &client_mode, 1}, + {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1}, {NULL, 0, 0, 0}, }; @@ -600,27 +626,12 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Enable/disable RX VLAN strip on host. */ - if (!strncmp(long_option[option_index].name, - "vlan-strip", MAX_LONG_OPT_SZ)) { - ret = parse_num_opt(optarg, 1); - if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, - "Invalid argument for VLAN strip [0|1]\n"); - us_vhost_usage(prgname); - return -1; - } else { - vlan_strip = !!ret; - vmdq_conf_default.rxmode.hw_vlan_strip = - vlan_strip; - } - } - /* Enable/disable stats. */ if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for stats [0..N]\n"); us_vhost_usage(prgname); return -1; } else { @@ -628,10 +639,13 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Set character device basename. */ - if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) { - if (us_vhost_parse_basename(optarg) == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ); + /* Set socket file path. */ + if (!strncmp(long_option[option_index].name, + "socket-file", MAX_LONG_OPT_SZ)) { + if (us_vhost_parse_socket_path(optarg) == -1) { + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for socket name (Max %d characters)\n", + PATH_MAX); us_vhost_usage(prgname); return -1; } @@ -743,10 +757,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) "(%d) failed to add device MAC address to VMDQ\n", vdev->vid); - /* Enable stripping of the vlan tag as we handle routing. */ - if (vlan_strip) - rte_eth_dev_set_vlan_strip_on_queue(ports[0], - (uint16_t)vdev->vmdq_rx_q, 1); + rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; @@ -795,7 +806,7 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, { uint16_t ret; - ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1); + ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1); if (enable_stats) { rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic); rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret); @@ -821,17 +832,17 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) return -1; if (vdev->vid == dst_vdev->vid) { - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: src and dst MAC is same. Dropping packet.\n", vdev->vid); return 0; } - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: MAC address is local\n", dst_vdev->vid); if (unlikely(dst_vdev->remove)) { - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) device is marked for removal\n", dst_vdev->vid); return 0; } @@ -856,7 +867,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, return 0; if (vdev->vid == dst_vdev->vid) { - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: src and dst MAC is same. Dropping packet.\n", vdev->vid); return -1; @@ -870,7 +881,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, *offset = VLAN_HLEN; *vlan_tag = vlan_tags[vdev->vid]; - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n", vdev->vid, dst_vdev->vid, *vlan_tag); @@ -962,7 +973,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) } } - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n", vdev->vid); queue2nic: @@ -1030,7 +1041,7 @@ drain_mbuf_table(struct mbuf_table *tx_q) if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) { prev_tsc = cur_tsc; - RTE_LOG(DEBUG, VHOST_DATA, + RTE_LOG_DP(DEBUG, VHOST_DATA, "TX queue drained after timeout with burst size %u\n", tx_q->len); do_drain_mbuf_table(tx_q); @@ -1041,7 +1052,6 @@ static inline void __attribute__((always_inline)) drain_eth_rx(struct vhost_dev *vdev) { uint16_t rx_count, enqueue_count; - struct virtio_net *dev = vdev->dev; struct rte_mbuf *pkts[MAX_PKT_BURST]; rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, @@ -1055,19 +1065,19 @@ drain_eth_rx(struct vhost_dev *vdev) * to diminish packet loss. */ if (enable_retry && - unlikely(rx_count > rte_vhost_avail_entries(dev->vid, + unlikely(rx_count > rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) { uint32_t retry; for (retry = 0; retry < burst_rx_retry_num; retry++) { rte_delay_us(burst_rx_delay_time); - if (rx_count <= rte_vhost_avail_entries(dev->vid, + if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ)) break; } } - enqueue_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, + enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, pkts, rx_count); if (enable_stats) { rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count); @@ -1084,7 +1094,7 @@ drain_virtio_tx(struct vhost_dev *vdev) uint16_t count; uint16_t i; - count = rte_vhost_dequeue_burst(vdev->dev, VIRTIO_TXQ, mbuf_pool, + count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST); /* setup VMDq for the first packet */ @@ -1171,13 +1181,13 @@ switch_worker(void *arg __rte_unused) * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. */ static void -destroy_device (volatile struct virtio_net *dev) +destroy_device(int vid) { struct vhost_dev *vdev = NULL; int lcore; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { - if (vdev->vid == dev->vid) + if (vdev->vid == vid) break; } if (!vdev) @@ -1221,12 +1231,11 @@ destroy_device (volatile struct virtio_net *dev) * and the allocated to a specific data core. */ static int -new_device (struct virtio_net *dev) +new_device(int vid) { int lcore, core_add = 0; uint32_t device_num_min = num_devices; struct vhost_dev *vdev; - int vid = dev->vid; vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); if (vdev == NULL) { @@ -1235,7 +1244,6 @@ new_device (struct virtio_net *dev) vid); return -1; } - vdev->dev = dev; vdev->vid = vid; TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); @@ -1259,8 +1267,8 @@ new_device (struct virtio_net *dev) lcore_info[vdev->coreid].device_num++; /* Disable notifications. */ - rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0); - rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0); + rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0); + rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0); RTE_LOG(INFO, VHOST_DATA, "(%d) device has been added to data core %d\n", @@ -1316,7 +1324,7 @@ print_stats(void) "RX total: %" PRIu64 "\n" "RX dropped: %" PRIu64 "\n" "RX successful: %" PRIu64 "\n", - vdev->dev->vid, + vdev->vid, tx_total, tx_dropped, tx, rx_total, rx_dropped, rx); } @@ -1325,14 +1333,27 @@ print_stats(void) } } +static void +unregister_drivers(int socket_num) +{ + int i, ret; + + for (i = 0; i < socket_num; i++) { + ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX); + if (ret != 0) + RTE_LOG(ERR, VHOST_CONFIG, + "Fail to unregister vhost driver for %s.\n", + socket_files + i * PATH_MAX); + } +} + /* When we receive a INT signal, unregister vhost driver */ static void sigint_handler(__rte_unused int signum) { /* Unregister vhost driver. */ - int ret = rte_vhost_driver_unregister((char *)&dev_basename); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n"); + unregister_drivers(nb_sockets); + exit(0); } @@ -1372,7 +1393,7 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, mtu = 64 * 1024; nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST / - (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST; + (mbuf_size - RTE_PKTMBUF_HEADROOM); nr_mbufs_per_core += nr_rx_desc; nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache); @@ -1388,18 +1409,18 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, } /* - * Main function, does initialisation and calls the per-lcore functions. The CUSE - * device is also registered here to handle the IOCTLs. + * Main function, does initialisation and calls the per-lcore functions. */ int main(int argc, char *argv[]) { unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; - int ret; + int ret, i; uint8_t portid; static pthread_t tid; char thread_name[RTE_MAX_THREAD_NAME_LEN]; + uint64_t flags = 0; signal(SIGINT, sigint_handler); @@ -1415,11 +1436,12 @@ main(int argc, char *argv[]) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid argument\n"); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { TAILQ_INIT(&lcore_info[lcore_id].vdev_list); if (rte_lcore_is_enabled(lcore_id)) - lcore_ids[core_id ++] = lcore_id; + lcore_ids[core_id++] = lcore_id; + } if (rte_lcore_count() > RTE_MAX_LCORE) rte_exit(EXIT_FAILURE,"Not enough cores\n"); @@ -1490,14 +1512,25 @@ main(int argc, char *argv[]) if (mergeable == 0) rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); - /* Register vhost(cuse or user) driver to handle vhost messages. */ - ret = rte_vhost_driver_register((char *)&dev_basename); - if (ret != 0) - rte_exit(EXIT_FAILURE, "vhost driver register failure.\n"); + if (client_mode) + flags |= RTE_VHOST_USER_CLIENT; + + if (dequeue_zero_copy) + flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY; + + /* Register vhost user driver to handle vhost messages. */ + for (i = 0; i < nb_sockets; i++) { + ret = rte_vhost_driver_register + (socket_files + i * PATH_MAX, flags); + if (ret != 0) { + unregister_drivers(i); + rte_exit(EXIT_FAILURE, + "vhost driver register failure.\n"); + } + } rte_vhost_driver_callback_register(&virtio_net_device_ops); - /* Start CUSE session. */ rte_vhost_driver_session_start(); return 0;