4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
93 /* Maximum long option length for option parsing. */
94 #define MAX_LONG_OPT_SZ 64
96 /* mask of enabled ports */
97 static uint32_t enabled_port_mask = 0;
99 /* Promiscuous mode */
100 static uint32_t promiscuous;
102 /* number of devices/queues to support*/
103 static uint32_t num_queues = 0;
104 static uint32_t num_devices;
106 static struct rte_mempool *mbuf_pool;
107 static int mergeable;
109 /* Do vlan strip on host, enabled on default */
110 static uint32_t vlan_strip = 1;
112 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
119 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
122 static uint32_t enable_stats = 0;
123 /* Enable retries on RX. */
124 static uint32_t enable_retry = 1;
126 /* Disable TX checksum offload */
127 static uint32_t enable_tx_csum;
129 /* Disable TSO offload */
130 static uint32_t enable_tso;
132 static int client_mode;
134 /* Specify timeout (in useconds) between retries on RX. */
135 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
136 /* Specify the number of retries on RX. */
137 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
139 /* Socket file path. Can be set by user */
140 static char socket_file[PATH_MAX] = "vhost-net";
142 /* empty vmdq configuration structure. Filled in programatically */
143 static struct rte_eth_conf vmdq_conf_default = {
145 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
147 .header_split = 0, /**< Header Split disabled */
148 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
149 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
151 * It is necessary for 1G NIC such as I350,
152 * this fixes bug of ipv4 forwarding in guest can't
153 * forward pakets from one virtio dev to another virtio dev.
155 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
156 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
157 .hw_strip_crc = 0, /**< CRC stripped by hardware */
161 .mq_mode = ETH_MQ_TX_NONE,
165 * should be overridden separately in code with
169 .nb_queue_pools = ETH_8_POOLS,
170 .enable_default_pool = 0,
173 .pool_map = {{0, 0},},
178 static unsigned lcore_ids[RTE_MAX_LCORE];
179 static uint8_t ports[RTE_MAX_ETHPORTS];
180 static unsigned num_ports = 0; /**< The number of ports specified in command line */
181 static uint16_t num_pf_queues, num_vmdq_queues;
182 static uint16_t vmdq_pool_base, vmdq_queue_base;
183 static uint16_t queues_per_pool;
185 const uint16_t vlan_tags[] = {
186 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
187 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
188 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
189 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
190 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
191 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
192 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
193 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
196 /* ethernet addresses of ports */
197 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
199 static struct vhost_dev_tailq_list vhost_dev_list =
200 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
202 static struct lcore_info lcore_info[RTE_MAX_LCORE];
204 /* Used for queueing bursts of TX packets. */
208 struct rte_mbuf *m_table[MAX_PKT_BURST];
211 /* TX queue for each data core. */
212 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
214 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
215 / US_PER_S * BURST_TX_DRAIN_US)
219 * Builds up the correct configuration for VMDQ VLAN pool map
220 * according to the pool & queue limits.
223 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
225 struct rte_eth_vmdq_rx_conf conf;
226 struct rte_eth_vmdq_rx_conf *def_conf =
227 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
230 memset(&conf, 0, sizeof(conf));
231 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
232 conf.nb_pool_maps = num_devices;
233 conf.enable_loop_back = def_conf->enable_loop_back;
234 conf.rx_mode = def_conf->rx_mode;
236 for (i = 0; i < conf.nb_pool_maps; i++) {
237 conf.pool_map[i].vlan_id = vlan_tags[ i ];
238 conf.pool_map[i].pools = (1UL << i);
241 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
242 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
243 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
248 * Validate the device number according to the max pool number gotten form
249 * dev_info. If the device number is invalid, give the error message and
250 * return -1. Each device must have its own pool.
253 validate_num_devices(uint32_t max_nb_devices)
255 if (num_devices > max_nb_devices) {
256 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
263 * Initialises a given port using global settings and with the rx buffers
264 * coming from the mbuf_pool passed as parameter
267 port_init(uint8_t port)
269 struct rte_eth_dev_info dev_info;
270 struct rte_eth_conf port_conf;
271 struct rte_eth_rxconf *rxconf;
272 struct rte_eth_txconf *txconf;
273 int16_t rx_rings, tx_rings;
274 uint16_t rx_ring_size, tx_ring_size;
278 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
279 rte_eth_dev_info_get (port, &dev_info);
281 if (dev_info.max_rx_queues > MAX_QUEUES) {
282 rte_exit(EXIT_FAILURE,
283 "please define MAX_QUEUES no less than %u in %s\n",
284 dev_info.max_rx_queues, __FILE__);
287 rxconf = &dev_info.default_rxconf;
288 txconf = &dev_info.default_txconf;
289 rxconf->rx_drop_en = 1;
291 /* Enable vlan offload */
292 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
294 /*configure the number of supported virtio devices based on VMDQ limits */
295 num_devices = dev_info.max_vmdq_pools;
297 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
298 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
299 tx_rings = (uint16_t)rte_lcore_count();
301 retval = validate_num_devices(MAX_DEVICES);
305 /* Get port configuration. */
306 retval = get_eth_conf(&port_conf, num_devices);
309 /* NIC queues are divided into pf queues and vmdq queues. */
310 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
311 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
312 num_vmdq_queues = num_devices * queues_per_pool;
313 num_queues = num_pf_queues + num_vmdq_queues;
314 vmdq_queue_base = dev_info.vmdq_queue_base;
315 vmdq_pool_base = dev_info.vmdq_pool_base;
316 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
317 num_pf_queues, num_devices, queues_per_pool);
319 if (port >= rte_eth_dev_count()) return -1;
321 if (enable_tx_csum == 0)
322 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
324 if (enable_tso == 0) {
325 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
326 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
327 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
331 rx_rings = (uint16_t)dev_info.max_rx_queues;
332 /* Configure ethernet device. */
333 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
335 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
336 port, strerror(-retval));
340 /* Setup the queues. */
341 for (q = 0; q < rx_rings; q ++) {
342 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
343 rte_eth_dev_socket_id(port),
347 RTE_LOG(ERR, VHOST_PORT,
348 "Failed to setup rx queue %u of port %u: %s.\n",
349 q, port, strerror(-retval));
353 for (q = 0; q < tx_rings; q ++) {
354 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
355 rte_eth_dev_socket_id(port),
358 RTE_LOG(ERR, VHOST_PORT,
359 "Failed to setup tx queue %u of port %u: %s.\n",
360 q, port, strerror(-retval));
365 /* Start the device. */
366 retval = rte_eth_dev_start(port);
368 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
369 port, strerror(-retval));
374 rte_eth_promiscuous_enable(port);
376 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
377 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
378 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
379 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
381 vmdq_ports_eth_addr[port].addr_bytes[0],
382 vmdq_ports_eth_addr[port].addr_bytes[1],
383 vmdq_ports_eth_addr[port].addr_bytes[2],
384 vmdq_ports_eth_addr[port].addr_bytes[3],
385 vmdq_ports_eth_addr[port].addr_bytes[4],
386 vmdq_ports_eth_addr[port].addr_bytes[5]);
392 * Set socket file path.
395 us_vhost_parse_socket_path(const char *q_arg)
397 /* parse number string */
399 if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
402 snprintf((char *)&socket_file, PATH_MAX, "%s", q_arg);
408 * Parse the portmask provided at run time.
411 parse_portmask(const char *portmask)
418 /* parse hexadecimal string */
419 pm = strtoul(portmask, &end, 16);
420 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
431 * Parse num options at run time.
434 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
441 /* parse unsigned int string */
442 num = strtoul(q_arg, &end, 10);
443 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
446 if (num > max_valid_value)
457 us_vhost_usage(const char *prgname)
459 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
461 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
462 " --socket-file <path>\n"
464 " -p PORTMASK: Set mask for ports to be used by application\n"
465 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
466 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
467 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
468 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
469 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
470 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
471 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
472 " --socket-file: The path of the socket file.\n"
473 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
474 " --tso [0|1] disable/enable TCP segment offload.\n"
475 " --client register a vhost-user socket as client mode.\n",
480 * Parse the arguments given in the command line of the application.
483 us_vhost_parse_args(int argc, char **argv)
488 const char *prgname = argv[0];
489 static struct option long_option[] = {
490 {"vm2vm", required_argument, NULL, 0},
491 {"rx-retry", required_argument, NULL, 0},
492 {"rx-retry-delay", required_argument, NULL, 0},
493 {"rx-retry-num", required_argument, NULL, 0},
494 {"mergeable", required_argument, NULL, 0},
495 {"vlan-strip", required_argument, NULL, 0},
496 {"stats", required_argument, NULL, 0},
497 {"socket-file", required_argument, NULL, 0},
498 {"tx-csum", required_argument, NULL, 0},
499 {"tso", required_argument, NULL, 0},
500 {"client", no_argument, &client_mode, 1},
504 /* Parse command line */
505 while ((opt = getopt_long(argc, argv, "p:P",
506 long_option, &option_index)) != EOF) {
510 enabled_port_mask = parse_portmask(optarg);
511 if (enabled_port_mask == 0) {
512 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
513 us_vhost_usage(prgname);
520 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
521 ETH_VMDQ_ACCEPT_BROADCAST |
522 ETH_VMDQ_ACCEPT_MULTICAST;
523 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
528 /* Enable/disable vm2vm comms. */
529 if (!strncmp(long_option[option_index].name, "vm2vm",
531 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
533 RTE_LOG(INFO, VHOST_CONFIG,
534 "Invalid argument for "
536 us_vhost_usage(prgname);
539 vm2vm_mode = (vm2vm_type)ret;
543 /* Enable/disable retries on RX. */
544 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
545 ret = parse_num_opt(optarg, 1);
547 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
548 us_vhost_usage(prgname);
555 /* Enable/disable TX checksum offload. */
556 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
557 ret = parse_num_opt(optarg, 1);
559 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
560 us_vhost_usage(prgname);
563 enable_tx_csum = ret;
566 /* Enable/disable TSO offload. */
567 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
568 ret = parse_num_opt(optarg, 1);
570 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
571 us_vhost_usage(prgname);
577 /* Specify the retries delay time (in useconds) on RX. */
578 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
579 ret = parse_num_opt(optarg, INT32_MAX);
581 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
582 us_vhost_usage(prgname);
585 burst_rx_delay_time = ret;
589 /* Specify the retries number on RX. */
590 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
591 ret = parse_num_opt(optarg, INT32_MAX);
593 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
594 us_vhost_usage(prgname);
597 burst_rx_retry_num = ret;
601 /* Enable/disable RX mergeable buffers. */
602 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
603 ret = parse_num_opt(optarg, 1);
605 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
606 us_vhost_usage(prgname);
611 vmdq_conf_default.rxmode.jumbo_frame = 1;
612 vmdq_conf_default.rxmode.max_rx_pkt_len
613 = JUMBO_FRAME_MAX_SIZE;
618 /* Enable/disable RX VLAN strip on host. */
619 if (!strncmp(long_option[option_index].name,
620 "vlan-strip", MAX_LONG_OPT_SZ)) {
621 ret = parse_num_opt(optarg, 1);
623 RTE_LOG(INFO, VHOST_CONFIG,
624 "Invalid argument for VLAN strip [0|1]\n");
625 us_vhost_usage(prgname);
629 vmdq_conf_default.rxmode.hw_vlan_strip =
634 /* Enable/disable stats. */
635 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
636 ret = parse_num_opt(optarg, INT32_MAX);
638 RTE_LOG(INFO, VHOST_CONFIG,
639 "Invalid argument for stats [0..N]\n");
640 us_vhost_usage(prgname);
647 /* Set socket file path. */
648 if (!strncmp(long_option[option_index].name,
649 "socket-file", MAX_LONG_OPT_SZ)) {
650 if (us_vhost_parse_socket_path(optarg) == -1) {
651 RTE_LOG(INFO, VHOST_CONFIG,
652 "Invalid argument for socket name (Max %d characters)\n",
654 us_vhost_usage(prgname);
661 /* Invalid option - print options. */
663 us_vhost_usage(prgname);
668 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
669 if (enabled_port_mask & (1 << i))
670 ports[num_ports++] = (uint8_t)i;
673 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
674 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
675 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
683 * Update the global var NUM_PORTS and array PORTS according to system ports number
684 * and return valid ports number
686 static unsigned check_ports_num(unsigned nb_ports)
688 unsigned valid_num_ports = num_ports;
691 if (num_ports > nb_ports) {
692 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
693 num_ports, nb_ports);
694 num_ports = nb_ports;
697 for (portid = 0; portid < num_ports; portid ++) {
698 if (ports[portid] >= nb_ports) {
699 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
700 ports[portid], (nb_ports - 1));
701 ports[portid] = INVALID_PORT_ID;
705 return valid_num_ports;
708 static inline struct vhost_dev *__attribute__((always_inline))
709 find_vhost_dev(struct ether_addr *mac)
711 struct vhost_dev *vdev;
713 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
714 if (vdev->ready == DEVICE_RX &&
715 is_same_ether_addr(mac, &vdev->mac_address))
723 * This function learns the MAC address of the device and registers this along with a
724 * vlan tag to a VMDQ.
727 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
729 struct ether_hdr *pkt_hdr;
732 /* Learn MAC address of guest device from packet */
733 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
735 if (find_vhost_dev(&pkt_hdr->s_addr)) {
736 RTE_LOG(ERR, VHOST_DATA,
737 "(%d) device is using a registered MAC!\n",
742 for (i = 0; i < ETHER_ADDR_LEN; i++)
743 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
745 /* vlan_tag currently uses the device_id. */
746 vdev->vlan_tag = vlan_tags[vdev->vid];
748 /* Print out VMDQ registration info. */
749 RTE_LOG(INFO, VHOST_DATA,
750 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
752 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
753 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
754 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
757 /* Register the MAC address. */
758 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
759 (uint32_t)vdev->vid + vmdq_pool_base);
761 RTE_LOG(ERR, VHOST_DATA,
762 "(%d) failed to add device MAC address to VMDQ\n",
765 /* Enable stripping of the vlan tag as we handle routing. */
767 rte_eth_dev_set_vlan_strip_on_queue(ports[0],
768 (uint16_t)vdev->vmdq_rx_q, 1);
770 /* Set device as ready for RX. */
771 vdev->ready = DEVICE_RX;
777 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
778 * queue before disabling RX on the device.
781 unlink_vmdq(struct vhost_dev *vdev)
785 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
787 if (vdev->ready == DEVICE_RX) {
788 /*clear MAC and VLAN settings*/
789 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
790 for (i = 0; i < 6; i++)
791 vdev->mac_address.addr_bytes[i] = 0;
795 /*Clear out the receive buffers*/
796 rx_count = rte_eth_rx_burst(ports[0],
797 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
800 for (i = 0; i < rx_count; i++)
801 rte_pktmbuf_free(pkts_burst[i]);
803 rx_count = rte_eth_rx_burst(ports[0],
804 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
807 vdev->ready = DEVICE_MAC_LEARNING;
811 static inline void __attribute__((always_inline))
812 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
817 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
819 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
820 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
821 src_vdev->stats.tx_total++;
822 src_vdev->stats.tx += ret;
827 * Check if the packet destination MAC address is for a local device. If so then put
828 * the packet on that devices RX queue. If not then return.
830 static inline int __attribute__((always_inline))
831 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
833 struct ether_hdr *pkt_hdr;
834 struct vhost_dev *dst_vdev;
836 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
838 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
842 if (vdev->vid == dst_vdev->vid) {
843 RTE_LOG(DEBUG, VHOST_DATA,
844 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
849 RTE_LOG(DEBUG, VHOST_DATA,
850 "(%d) TX: MAC address is local\n", dst_vdev->vid);
852 if (unlikely(dst_vdev->remove)) {
853 RTE_LOG(DEBUG, VHOST_DATA,
854 "(%d) device is marked for removal\n", dst_vdev->vid);
858 virtio_xmit(dst_vdev, vdev, m);
863 * Check if the destination MAC of a packet is one local VM,
864 * and get its vlan tag, and offset if it is.
866 static inline int __attribute__((always_inline))
867 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
868 uint32_t *offset, uint16_t *vlan_tag)
870 struct vhost_dev *dst_vdev;
871 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
873 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
877 if (vdev->vid == dst_vdev->vid) {
878 RTE_LOG(DEBUG, VHOST_DATA,
879 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
885 * HW vlan strip will reduce the packet length
886 * by minus length of vlan tag, so need restore
887 * the packet length by plus it.
890 *vlan_tag = vlan_tags[vdev->vid];
892 RTE_LOG(DEBUG, VHOST_DATA,
893 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
894 vdev->vid, dst_vdev->vid, *vlan_tag);
900 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
902 if (ol_flags & PKT_TX_IPV4)
903 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
904 else /* assume ethertype == ETHER_TYPE_IPv6 */
905 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
908 static void virtio_tx_offload(struct rte_mbuf *m)
911 struct ipv4_hdr *ipv4_hdr = NULL;
912 struct tcp_hdr *tcp_hdr = NULL;
913 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
915 l3_hdr = (char *)eth_hdr + m->l2_len;
917 if (m->ol_flags & PKT_TX_IPV4) {
919 ipv4_hdr->hdr_checksum = 0;
920 m->ol_flags |= PKT_TX_IP_CKSUM;
923 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
924 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
928 free_pkts(struct rte_mbuf **pkts, uint16_t n)
931 rte_pktmbuf_free(pkts[n]);
934 static inline void __attribute__((always_inline))
935 do_drain_mbuf_table(struct mbuf_table *tx_q)
939 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
940 tx_q->m_table, tx_q->len);
941 if (unlikely(count < tx_q->len))
942 free_pkts(&tx_q->m_table[count], tx_q->len - count);
948 * This function routes the TX packet to the correct interface. This
949 * may be a local device or the physical port.
951 static inline void __attribute__((always_inline))
952 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
954 struct mbuf_table *tx_q;
956 const uint16_t lcore_id = rte_lcore_id();
957 struct ether_hdr *nh;
960 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
961 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
962 struct vhost_dev *vdev2;
964 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
965 virtio_xmit(vdev2, vdev, m);
970 /*check if destination is local VM*/
971 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
976 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
977 if (unlikely(find_local_dest(vdev, m, &offset,
984 RTE_LOG(DEBUG, VHOST_DATA,
985 "(%d) TX: MAC address is external\n", vdev->vid);
989 /*Add packet to the port tx queue*/
990 tx_q = &lcore_tx_queue[lcore_id];
992 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
993 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
994 /* Guest has inserted the vlan tag. */
995 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
996 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
997 if ((vm2vm_mode == VM2VM_HARDWARE) &&
998 (vh->vlan_tci != vlan_tag_be))
999 vh->vlan_tci = vlan_tag_be;
1001 m->ol_flags |= PKT_TX_VLAN_PKT;
1004 * Find the right seg to adjust the data len when offset is
1005 * bigger than tail room size.
1007 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1008 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1009 m->data_len += offset;
1011 struct rte_mbuf *seg = m;
1013 while ((seg->next != NULL) &&
1014 (offset > rte_pktmbuf_tailroom(seg)))
1017 seg->data_len += offset;
1019 m->pkt_len += offset;
1022 m->vlan_tci = vlan_tag;
1025 if (m->ol_flags & PKT_TX_TCP_SEG)
1026 virtio_tx_offload(m);
1028 tx_q->m_table[tx_q->len++] = m;
1030 vdev->stats.tx_total++;
1034 if (unlikely(tx_q->len == MAX_PKT_BURST))
1035 do_drain_mbuf_table(tx_q);
1039 static inline void __attribute__((always_inline))
1040 drain_mbuf_table(struct mbuf_table *tx_q)
1042 static uint64_t prev_tsc;
1048 cur_tsc = rte_rdtsc();
1049 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1052 RTE_LOG(DEBUG, VHOST_DATA,
1053 "TX queue drained after timeout with burst size %u\n",
1055 do_drain_mbuf_table(tx_q);
1059 static inline void __attribute__((always_inline))
1060 drain_eth_rx(struct vhost_dev *vdev)
1062 uint16_t rx_count, enqueue_count;
1063 struct rte_mbuf *pkts[MAX_PKT_BURST];
1065 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1066 pkts, MAX_PKT_BURST);
1071 * When "enable_retry" is set, here we wait and retry when there
1072 * is no enough free slots in the queue to hold @rx_count packets,
1073 * to diminish packet loss.
1076 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1080 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1081 rte_delay_us(burst_rx_delay_time);
1082 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1088 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1091 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1092 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1095 free_pkts(pkts, rx_count);
1098 static inline void __attribute__((always_inline))
1099 drain_virtio_tx(struct vhost_dev *vdev)
1101 struct rte_mbuf *pkts[MAX_PKT_BURST];
1105 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1106 pkts, MAX_PKT_BURST);
1108 /* setup VMDq for the first packet */
1109 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1110 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1111 free_pkts(pkts, count);
1114 for (i = 0; i < count; ++i)
1115 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1119 * Main function of vhost-switch. It basically does:
1121 * for each vhost device {
1124 * Which drains the host eth Rx queue linked to the vhost device,
1125 * and deliver all of them to guest virito Rx ring associated with
1126 * this vhost device.
1128 * - drain_virtio_tx()
1130 * Which drains the guest virtio Tx queue and deliver all of them
1131 * to the target, which could be another vhost device, or the
1132 * physical eth dev. The route is done in function "virtio_tx_route".
1136 switch_worker(void *arg __rte_unused)
1139 unsigned lcore_id = rte_lcore_id();
1140 struct vhost_dev *vdev;
1141 struct mbuf_table *tx_q;
1143 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1145 tx_q = &lcore_tx_queue[lcore_id];
1146 for (i = 0; i < rte_lcore_count(); i++) {
1147 if (lcore_ids[i] == lcore_id) {
1154 drain_mbuf_table(tx_q);
1157 * Inform the configuration core that we have exited the
1158 * linked list and that no devices are in use if requested.
1160 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1161 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1164 * Process vhost devices
1166 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1168 if (unlikely(vdev->remove)) {
1170 vdev->ready = DEVICE_SAFE_REMOVE;
1174 if (likely(vdev->ready == DEVICE_RX))
1177 if (likely(!vdev->remove))
1178 drain_virtio_tx(vdev);
1186 * Remove a device from the specific data core linked list and from the
1187 * main linked list. Synchonization occurs through the use of the
1188 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1189 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1192 destroy_device(int vid)
1194 struct vhost_dev *vdev = NULL;
1197 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1198 if (vdev->vid == vid)
1203 /*set the remove flag. */
1205 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1209 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1211 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1214 /* Set the dev_removal_flag on each lcore. */
1215 RTE_LCORE_FOREACH_SLAVE(lcore)
1216 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1219 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1220 * we can be sure that they can no longer access the device removed
1221 * from the linked lists and that the devices are no longer in use.
1223 RTE_LCORE_FOREACH_SLAVE(lcore) {
1224 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1228 lcore_info[vdev->coreid].device_num--;
1230 RTE_LOG(INFO, VHOST_DATA,
1231 "(%d) device has been removed from data core\n",
1238 * A new device is added to a data core. First the device is added to the main linked list
1239 * and the allocated to a specific data core.
1244 int lcore, core_add = 0;
1245 uint32_t device_num_min = num_devices;
1246 struct vhost_dev *vdev;
1248 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1250 RTE_LOG(INFO, VHOST_DATA,
1251 "(%d) couldn't allocate memory for vhost dev\n",
1257 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1258 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1260 /*reset ready flag*/
1261 vdev->ready = DEVICE_MAC_LEARNING;
1264 /* Find a suitable lcore to add the device. */
1265 RTE_LCORE_FOREACH_SLAVE(lcore) {
1266 if (lcore_info[lcore].device_num < device_num_min) {
1267 device_num_min = lcore_info[lcore].device_num;
1271 vdev->coreid = core_add;
1273 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1275 lcore_info[vdev->coreid].device_num++;
1277 /* Disable notifications. */
1278 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1279 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1281 RTE_LOG(INFO, VHOST_DATA,
1282 "(%d) device has been added to data core %d\n",
1289 * These callback allow devices to be added to the data core when configuration
1290 * has been fully complete.
1292 static const struct virtio_net_device_ops virtio_net_device_ops =
1294 .new_device = new_device,
1295 .destroy_device = destroy_device,
1299 * This is a thread will wake up after a period to print stats if the user has
1305 struct vhost_dev *vdev;
1306 uint64_t tx_dropped, rx_dropped;
1307 uint64_t tx, tx_total, rx, rx_total;
1308 const char clr[] = { 27, '[', '2', 'J', '\0' };
1309 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1312 sleep(enable_stats);
1314 /* Clear screen and move to top left */
1315 printf("%s%s\n", clr, top_left);
1316 printf("Device statistics =================================\n");
1318 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1319 tx_total = vdev->stats.tx_total;
1320 tx = vdev->stats.tx;
1321 tx_dropped = tx_total - tx;
1323 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1324 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1325 rx_dropped = rx_total - rx;
1327 printf("Statistics for device %d\n"
1328 "-----------------------\n"
1329 "TX total: %" PRIu64 "\n"
1330 "TX dropped: %" PRIu64 "\n"
1331 "TX successful: %" PRIu64 "\n"
1332 "RX total: %" PRIu64 "\n"
1333 "RX dropped: %" PRIu64 "\n"
1334 "RX successful: %" PRIu64 "\n",
1336 tx_total, tx_dropped, tx,
1337 rx_total, rx_dropped, rx);
1340 printf("===================================================\n");
1344 /* When we receive a INT signal, unregister vhost driver */
1346 sigint_handler(__rte_unused int signum)
1348 /* Unregister vhost driver. */
1349 int ret = rte_vhost_driver_unregister((char *)&socket_file);
1351 rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1356 * While creating an mbuf pool, one key thing is to figure out how
1357 * many mbuf entries is enough for our use. FYI, here are some
1360 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1362 * - For each switch core (A CPU core does the packet switch), we need
1363 * also make some reservation for receiving the packets from virtio
1364 * Tx queue. How many is enough depends on the usage. It's normally
1365 * a simple calculation like following:
1367 * MAX_PKT_BURST * max packet size / mbuf size
1369 * So, we definitely need allocate more mbufs when TSO is enabled.
1371 * - Similarly, for each switching core, we should serve @nr_rx_desc
1372 * mbufs for receiving the packets from physical NIC device.
1374 * - We also need make sure, for each switch core, we have allocated
1375 * enough mbufs to fill up the mbuf cache.
1378 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1379 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1382 uint32_t nr_mbufs_per_core;
1383 uint32_t mtu = 1500;
1390 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1391 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1392 nr_mbufs_per_core += nr_rx_desc;
1393 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1395 nr_mbufs = nr_queues * nr_rx_desc;
1396 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1397 nr_mbufs *= nr_port;
1399 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1400 nr_mbuf_cache, 0, mbuf_size,
1402 if (mbuf_pool == NULL)
1403 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1407 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1408 * device is also registered here to handle the IOCTLs.
1411 main(int argc, char *argv[])
1413 unsigned lcore_id, core_id = 0;
1414 unsigned nb_ports, valid_num_ports;
1417 static pthread_t tid;
1418 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1421 signal(SIGINT, sigint_handler);
1424 ret = rte_eal_init(argc, argv);
1426 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1430 /* parse app arguments */
1431 ret = us_vhost_parse_args(argc, argv);
1433 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1435 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1436 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1438 if (rte_lcore_is_enabled(lcore_id))
1439 lcore_ids[core_id ++] = lcore_id;
1441 if (rte_lcore_count() > RTE_MAX_LCORE)
1442 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1444 /* Get the number of physical ports. */
1445 nb_ports = rte_eth_dev_count();
1448 * Update the global var NUM_PORTS and global array PORTS
1449 * and get value of var VALID_NUM_PORTS according to system ports number
1451 valid_num_ports = check_ports_num(nb_ports);
1453 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1454 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1455 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1460 * FIXME: here we are trying to allocate mbufs big enough for
1461 * @MAX_QUEUES, but the truth is we're never going to use that
1462 * many queues here. We probably should only do allocation for
1463 * those queues we are going to use.
1465 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1466 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1468 if (vm2vm_mode == VM2VM_HARDWARE) {
1469 /* Enable VT loop back to let L2 switch to do it. */
1470 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1471 RTE_LOG(DEBUG, VHOST_CONFIG,
1472 "Enable loop back for L2 switch in vmdq.\n");
1475 /* initialize all ports */
1476 for (portid = 0; portid < nb_ports; portid++) {
1477 /* skip ports that are not enabled */
1478 if ((enabled_port_mask & (1 << portid)) == 0) {
1479 RTE_LOG(INFO, VHOST_PORT,
1480 "Skipping disabled port %d\n", portid);
1483 if (port_init(portid) != 0)
1484 rte_exit(EXIT_FAILURE,
1485 "Cannot initialize network ports\n");
1488 /* Enable stats if the user option is set. */
1490 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1492 rte_exit(EXIT_FAILURE,
1493 "Cannot create print-stats thread\n");
1495 /* Set thread_name for aid in debugging. */
1496 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1497 ret = rte_thread_setname(tid, thread_name);
1499 RTE_LOG(DEBUG, VHOST_CONFIG,
1500 "Cannot set print-stats name\n");
1503 /* Launch all data cores. */
1504 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1505 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1508 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1511 flags |= RTE_VHOST_USER_CLIENT;
1513 /* Register vhost user driver to handle vhost messages. */
1514 ret = rte_vhost_driver_register(socket_file, flags);
1516 rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1518 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1520 /* Start CUSE session. */
1521 rte_vhost_driver_session_start();