4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
93 /* Maximum character device basename size. */
94 #define MAX_BASENAME_SZ 10
96 /* Maximum long option length for option parsing. */
97 #define MAX_LONG_OPT_SZ 64
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask = 0;
102 /* Promiscuous mode */
103 static uint32_t promiscuous;
105 /* number of devices/queues to support*/
106 static uint32_t num_queues = 0;
107 static uint32_t num_devices;
109 static struct rte_mempool *mbuf_pool;
110 static int mergeable;
112 /* Do vlan strip on host, enabled on default */
113 static uint32_t vlan_strip = 1;
115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
125 static uint32_t enable_stats = 0;
126 /* Enable retries on RX. */
127 static uint32_t enable_retry = 1;
129 /* Disable TX checksum offload */
130 static uint32_t enable_tx_csum;
132 /* Disable TSO offload */
133 static uint32_t enable_tso;
135 static int client_mode;
137 /* Specify timeout (in useconds) between retries on RX. */
138 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139 /* Specify the number of retries on RX. */
140 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
142 /* Character device basename. Can be set by user. */
143 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
145 /* empty vmdq configuration structure. Filled in programatically */
146 static struct rte_eth_conf vmdq_conf_default = {
148 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
150 .header_split = 0, /**< Header Split disabled */
151 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
152 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
154 * It is necessary for 1G NIC such as I350,
155 * this fixes bug of ipv4 forwarding in guest can't
156 * forward pakets from one virtio dev to another virtio dev.
158 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
159 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
160 .hw_strip_crc = 0, /**< CRC stripped by hardware */
164 .mq_mode = ETH_MQ_TX_NONE,
168 * should be overridden separately in code with
172 .nb_queue_pools = ETH_8_POOLS,
173 .enable_default_pool = 0,
176 .pool_map = {{0, 0},},
181 static unsigned lcore_ids[RTE_MAX_LCORE];
182 static uint8_t ports[RTE_MAX_ETHPORTS];
183 static unsigned num_ports = 0; /**< The number of ports specified in command line */
184 static uint16_t num_pf_queues, num_vmdq_queues;
185 static uint16_t vmdq_pool_base, vmdq_queue_base;
186 static uint16_t queues_per_pool;
188 const uint16_t vlan_tags[] = {
189 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
191 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
199 /* ethernet addresses of ports */
200 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
202 static struct vhost_dev_tailq_list vhost_dev_list =
203 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
205 static struct lcore_info lcore_info[RTE_MAX_LCORE];
207 /* Used for queueing bursts of TX packets. */
211 struct rte_mbuf *m_table[MAX_PKT_BURST];
214 /* TX queue for each data core. */
215 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
217 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
218 / US_PER_S * BURST_TX_DRAIN_US)
222 * Builds up the correct configuration for VMDQ VLAN pool map
223 * according to the pool & queue limits.
226 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
228 struct rte_eth_vmdq_rx_conf conf;
229 struct rte_eth_vmdq_rx_conf *def_conf =
230 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
233 memset(&conf, 0, sizeof(conf));
234 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235 conf.nb_pool_maps = num_devices;
236 conf.enable_loop_back = def_conf->enable_loop_back;
237 conf.rx_mode = def_conf->rx_mode;
239 for (i = 0; i < conf.nb_pool_maps; i++) {
240 conf.pool_map[i].vlan_id = vlan_tags[ i ];
241 conf.pool_map[i].pools = (1UL << i);
244 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
251 * Validate the device number according to the max pool number gotten form
252 * dev_info. If the device number is invalid, give the error message and
253 * return -1. Each device must have its own pool.
256 validate_num_devices(uint32_t max_nb_devices)
258 if (num_devices > max_nb_devices) {
259 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
266 * Initialises a given port using global settings and with the rx buffers
267 * coming from the mbuf_pool passed as parameter
270 port_init(uint8_t port)
272 struct rte_eth_dev_info dev_info;
273 struct rte_eth_conf port_conf;
274 struct rte_eth_rxconf *rxconf;
275 struct rte_eth_txconf *txconf;
276 int16_t rx_rings, tx_rings;
277 uint16_t rx_ring_size, tx_ring_size;
281 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282 rte_eth_dev_info_get (port, &dev_info);
284 if (dev_info.max_rx_queues > MAX_QUEUES) {
285 rte_exit(EXIT_FAILURE,
286 "please define MAX_QUEUES no less than %u in %s\n",
287 dev_info.max_rx_queues, __FILE__);
290 rxconf = &dev_info.default_rxconf;
291 txconf = &dev_info.default_txconf;
292 rxconf->rx_drop_en = 1;
294 /* Enable vlan offload */
295 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
297 /*configure the number of supported virtio devices based on VMDQ limits */
298 num_devices = dev_info.max_vmdq_pools;
300 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302 tx_rings = (uint16_t)rte_lcore_count();
304 retval = validate_num_devices(MAX_DEVICES);
308 /* Get port configuration. */
309 retval = get_eth_conf(&port_conf, num_devices);
312 /* NIC queues are divided into pf queues and vmdq queues. */
313 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 num_vmdq_queues = num_devices * queues_per_pool;
316 num_queues = num_pf_queues + num_vmdq_queues;
317 vmdq_queue_base = dev_info.vmdq_queue_base;
318 vmdq_pool_base = dev_info.vmdq_pool_base;
319 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 num_pf_queues, num_devices, queues_per_pool);
322 if (port >= rte_eth_dev_count()) return -1;
324 if (enable_tx_csum == 0)
325 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
327 if (enable_tso == 0) {
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
332 rx_rings = (uint16_t)dev_info.max_rx_queues;
333 /* Configure ethernet device. */
334 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
336 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
337 port, strerror(-retval));
341 /* Setup the queues. */
342 for (q = 0; q < rx_rings; q ++) {
343 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 rte_eth_dev_socket_id(port),
348 RTE_LOG(ERR, VHOST_PORT,
349 "Failed to setup rx queue %u of port %u: %s.\n",
350 q, port, strerror(-retval));
354 for (q = 0; q < tx_rings; q ++) {
355 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
356 rte_eth_dev_socket_id(port),
359 RTE_LOG(ERR, VHOST_PORT,
360 "Failed to setup tx queue %u of port %u: %s.\n",
361 q, port, strerror(-retval));
366 /* Start the device. */
367 retval = rte_eth_dev_start(port);
369 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
370 port, strerror(-retval));
375 rte_eth_promiscuous_enable(port);
377 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
378 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
379 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
380 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
382 vmdq_ports_eth_addr[port].addr_bytes[0],
383 vmdq_ports_eth_addr[port].addr_bytes[1],
384 vmdq_ports_eth_addr[port].addr_bytes[2],
385 vmdq_ports_eth_addr[port].addr_bytes[3],
386 vmdq_ports_eth_addr[port].addr_bytes[4],
387 vmdq_ports_eth_addr[port].addr_bytes[5]);
393 * Set character device basename.
396 us_vhost_parse_basename(const char *q_arg)
398 /* parse number string */
400 if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
403 snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
409 * Parse the portmask provided at run time.
412 parse_portmask(const char *portmask)
419 /* parse hexadecimal string */
420 pm = strtoul(portmask, &end, 16);
421 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
432 * Parse num options at run time.
435 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
442 /* parse unsigned int string */
443 num = strtoul(q_arg, &end, 10);
444 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
447 if (num > max_valid_value)
458 us_vhost_usage(const char *prgname)
460 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
462 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
463 " --dev-basename <name>\n"
465 " -p PORTMASK: Set mask for ports to be used by application\n"
466 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
467 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
468 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
469 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
470 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
471 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
472 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
473 " --dev-basename: The basename to be used for the character device.\n"
474 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
475 " --tso [0|1] disable/enable TCP segment offload.\n"
476 " --client register a vhost-user socket as client mode.\n",
481 * Parse the arguments given in the command line of the application.
484 us_vhost_parse_args(int argc, char **argv)
489 const char *prgname = argv[0];
490 static struct option long_option[] = {
491 {"vm2vm", required_argument, NULL, 0},
492 {"rx-retry", required_argument, NULL, 0},
493 {"rx-retry-delay", required_argument, NULL, 0},
494 {"rx-retry-num", required_argument, NULL, 0},
495 {"mergeable", required_argument, NULL, 0},
496 {"vlan-strip", required_argument, NULL, 0},
497 {"stats", required_argument, NULL, 0},
498 {"dev-basename", required_argument, NULL, 0},
499 {"tx-csum", required_argument, NULL, 0},
500 {"tso", required_argument, NULL, 0},
501 {"client", no_argument, &client_mode, 1},
505 /* Parse command line */
506 while ((opt = getopt_long(argc, argv, "p:P",
507 long_option, &option_index)) != EOF) {
511 enabled_port_mask = parse_portmask(optarg);
512 if (enabled_port_mask == 0) {
513 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
514 us_vhost_usage(prgname);
521 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
522 ETH_VMDQ_ACCEPT_BROADCAST |
523 ETH_VMDQ_ACCEPT_MULTICAST;
524 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
529 /* Enable/disable vm2vm comms. */
530 if (!strncmp(long_option[option_index].name, "vm2vm",
532 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
534 RTE_LOG(INFO, VHOST_CONFIG,
535 "Invalid argument for "
537 us_vhost_usage(prgname);
540 vm2vm_mode = (vm2vm_type)ret;
544 /* Enable/disable retries on RX. */
545 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
546 ret = parse_num_opt(optarg, 1);
548 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
549 us_vhost_usage(prgname);
556 /* Enable/disable TX checksum offload. */
557 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
558 ret = parse_num_opt(optarg, 1);
560 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
561 us_vhost_usage(prgname);
564 enable_tx_csum = ret;
567 /* Enable/disable TSO offload. */
568 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
569 ret = parse_num_opt(optarg, 1);
571 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
572 us_vhost_usage(prgname);
578 /* Specify the retries delay time (in useconds) on RX. */
579 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
580 ret = parse_num_opt(optarg, INT32_MAX);
582 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
583 us_vhost_usage(prgname);
586 burst_rx_delay_time = ret;
590 /* Specify the retries number on RX. */
591 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
592 ret = parse_num_opt(optarg, INT32_MAX);
594 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
595 us_vhost_usage(prgname);
598 burst_rx_retry_num = ret;
602 /* Enable/disable RX mergeable buffers. */
603 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
604 ret = parse_num_opt(optarg, 1);
606 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
607 us_vhost_usage(prgname);
612 vmdq_conf_default.rxmode.jumbo_frame = 1;
613 vmdq_conf_default.rxmode.max_rx_pkt_len
614 = JUMBO_FRAME_MAX_SIZE;
619 /* Enable/disable RX VLAN strip on host. */
620 if (!strncmp(long_option[option_index].name,
621 "vlan-strip", MAX_LONG_OPT_SZ)) {
622 ret = parse_num_opt(optarg, 1);
624 RTE_LOG(INFO, VHOST_CONFIG,
625 "Invalid argument for VLAN strip [0|1]\n");
626 us_vhost_usage(prgname);
630 vmdq_conf_default.rxmode.hw_vlan_strip =
635 /* Enable/disable stats. */
636 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
637 ret = parse_num_opt(optarg, INT32_MAX);
639 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
640 us_vhost_usage(prgname);
647 /* Set character device basename. */
648 if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
649 if (us_vhost_parse_basename(optarg) == -1) {
650 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
651 us_vhost_usage(prgname);
658 /* Invalid option - print options. */
660 us_vhost_usage(prgname);
665 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
666 if (enabled_port_mask & (1 << i))
667 ports[num_ports++] = (uint8_t)i;
670 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
671 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
672 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
680 * Update the global var NUM_PORTS and array PORTS according to system ports number
681 * and return valid ports number
683 static unsigned check_ports_num(unsigned nb_ports)
685 unsigned valid_num_ports = num_ports;
688 if (num_ports > nb_ports) {
689 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
690 num_ports, nb_ports);
691 num_ports = nb_ports;
694 for (portid = 0; portid < num_ports; portid ++) {
695 if (ports[portid] >= nb_ports) {
696 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
697 ports[portid], (nb_ports - 1));
698 ports[portid] = INVALID_PORT_ID;
702 return valid_num_ports;
705 static inline struct vhost_dev *__attribute__((always_inline))
706 find_vhost_dev(struct ether_addr *mac)
708 struct vhost_dev *vdev;
710 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
711 if (vdev->ready == DEVICE_RX &&
712 is_same_ether_addr(mac, &vdev->mac_address))
720 * This function learns the MAC address of the device and registers this along with a
721 * vlan tag to a VMDQ.
724 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
726 struct ether_hdr *pkt_hdr;
729 /* Learn MAC address of guest device from packet */
730 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
732 if (find_vhost_dev(&pkt_hdr->s_addr)) {
733 RTE_LOG(ERR, VHOST_DATA,
734 "(%d) device is using a registered MAC!\n",
739 for (i = 0; i < ETHER_ADDR_LEN; i++)
740 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
742 /* vlan_tag currently uses the device_id. */
743 vdev->vlan_tag = vlan_tags[vdev->vid];
745 /* Print out VMDQ registration info. */
746 RTE_LOG(INFO, VHOST_DATA,
747 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
749 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
750 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
751 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
754 /* Register the MAC address. */
755 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
756 (uint32_t)vdev->vid + vmdq_pool_base);
758 RTE_LOG(ERR, VHOST_DATA,
759 "(%d) failed to add device MAC address to VMDQ\n",
762 /* Enable stripping of the vlan tag as we handle routing. */
764 rte_eth_dev_set_vlan_strip_on_queue(ports[0],
765 (uint16_t)vdev->vmdq_rx_q, 1);
767 /* Set device as ready for RX. */
768 vdev->ready = DEVICE_RX;
774 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
775 * queue before disabling RX on the device.
778 unlink_vmdq(struct vhost_dev *vdev)
782 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
784 if (vdev->ready == DEVICE_RX) {
785 /*clear MAC and VLAN settings*/
786 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
787 for (i = 0; i < 6; i++)
788 vdev->mac_address.addr_bytes[i] = 0;
792 /*Clear out the receive buffers*/
793 rx_count = rte_eth_rx_burst(ports[0],
794 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
797 for (i = 0; i < rx_count; i++)
798 rte_pktmbuf_free(pkts_burst[i]);
800 rx_count = rte_eth_rx_burst(ports[0],
801 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
804 vdev->ready = DEVICE_MAC_LEARNING;
808 static inline void __attribute__((always_inline))
809 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
814 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
816 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
817 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
818 src_vdev->stats.tx_total++;
819 src_vdev->stats.tx += ret;
824 * Check if the packet destination MAC address is for a local device. If so then put
825 * the packet on that devices RX queue. If not then return.
827 static inline int __attribute__((always_inline))
828 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
830 struct ether_hdr *pkt_hdr;
831 struct vhost_dev *dst_vdev;
833 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
835 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
839 if (vdev->vid == dst_vdev->vid) {
840 RTE_LOG(DEBUG, VHOST_DATA,
841 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
846 RTE_LOG(DEBUG, VHOST_DATA,
847 "(%d) TX: MAC address is local\n", dst_vdev->vid);
849 if (unlikely(dst_vdev->remove)) {
850 RTE_LOG(DEBUG, VHOST_DATA,
851 "(%d) device is marked for removal\n", dst_vdev->vid);
855 virtio_xmit(dst_vdev, vdev, m);
860 * Check if the destination MAC of a packet is one local VM,
861 * and get its vlan tag, and offset if it is.
863 static inline int __attribute__((always_inline))
864 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
865 uint32_t *offset, uint16_t *vlan_tag)
867 struct vhost_dev *dst_vdev;
868 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
870 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
874 if (vdev->vid == dst_vdev->vid) {
875 RTE_LOG(DEBUG, VHOST_DATA,
876 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
882 * HW vlan strip will reduce the packet length
883 * by minus length of vlan tag, so need restore
884 * the packet length by plus it.
887 *vlan_tag = vlan_tags[vdev->vid];
889 RTE_LOG(DEBUG, VHOST_DATA,
890 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
891 vdev->vid, dst_vdev->vid, *vlan_tag);
897 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
899 if (ol_flags & PKT_TX_IPV4)
900 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
901 else /* assume ethertype == ETHER_TYPE_IPv6 */
902 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
905 static void virtio_tx_offload(struct rte_mbuf *m)
908 struct ipv4_hdr *ipv4_hdr = NULL;
909 struct tcp_hdr *tcp_hdr = NULL;
910 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
912 l3_hdr = (char *)eth_hdr + m->l2_len;
914 if (m->ol_flags & PKT_TX_IPV4) {
916 ipv4_hdr->hdr_checksum = 0;
917 m->ol_flags |= PKT_TX_IP_CKSUM;
920 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
921 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
925 free_pkts(struct rte_mbuf **pkts, uint16_t n)
928 rte_pktmbuf_free(pkts[n]);
931 static inline void __attribute__((always_inline))
932 do_drain_mbuf_table(struct mbuf_table *tx_q)
936 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
937 tx_q->m_table, tx_q->len);
938 if (unlikely(count < tx_q->len))
939 free_pkts(&tx_q->m_table[count], tx_q->len - count);
945 * This function routes the TX packet to the correct interface. This
946 * may be a local device or the physical port.
948 static inline void __attribute__((always_inline))
949 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
951 struct mbuf_table *tx_q;
953 const uint16_t lcore_id = rte_lcore_id();
954 struct ether_hdr *nh;
957 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
958 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
959 struct vhost_dev *vdev2;
961 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
962 virtio_xmit(vdev2, vdev, m);
967 /*check if destination is local VM*/
968 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
973 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
974 if (unlikely(find_local_dest(vdev, m, &offset,
981 RTE_LOG(DEBUG, VHOST_DATA,
982 "(%d) TX: MAC address is external\n", vdev->vid);
986 /*Add packet to the port tx queue*/
987 tx_q = &lcore_tx_queue[lcore_id];
989 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
990 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
991 /* Guest has inserted the vlan tag. */
992 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
993 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
994 if ((vm2vm_mode == VM2VM_HARDWARE) &&
995 (vh->vlan_tci != vlan_tag_be))
996 vh->vlan_tci = vlan_tag_be;
998 m->ol_flags |= PKT_TX_VLAN_PKT;
1001 * Find the right seg to adjust the data len when offset is
1002 * bigger than tail room size.
1004 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1005 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1006 m->data_len += offset;
1008 struct rte_mbuf *seg = m;
1010 while ((seg->next != NULL) &&
1011 (offset > rte_pktmbuf_tailroom(seg)))
1014 seg->data_len += offset;
1016 m->pkt_len += offset;
1019 m->vlan_tci = vlan_tag;
1022 if (m->ol_flags & PKT_TX_TCP_SEG)
1023 virtio_tx_offload(m);
1025 tx_q->m_table[tx_q->len++] = m;
1027 vdev->stats.tx_total++;
1031 if (unlikely(tx_q->len == MAX_PKT_BURST))
1032 do_drain_mbuf_table(tx_q);
1036 static inline void __attribute__((always_inline))
1037 drain_mbuf_table(struct mbuf_table *tx_q)
1039 static uint64_t prev_tsc;
1045 cur_tsc = rte_rdtsc();
1046 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1049 RTE_LOG(DEBUG, VHOST_DATA,
1050 "TX queue drained after timeout with burst size %u\n",
1052 do_drain_mbuf_table(tx_q);
1056 static inline void __attribute__((always_inline))
1057 drain_eth_rx(struct vhost_dev *vdev)
1059 uint16_t rx_count, enqueue_count;
1060 struct rte_mbuf *pkts[MAX_PKT_BURST];
1062 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1063 pkts, MAX_PKT_BURST);
1068 * When "enable_retry" is set, here we wait and retry when there
1069 * is no enough free slots in the queue to hold @rx_count packets,
1070 * to diminish packet loss.
1073 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1077 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1078 rte_delay_us(burst_rx_delay_time);
1079 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1085 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1088 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1089 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1092 free_pkts(pkts, rx_count);
1095 static inline void __attribute__((always_inline))
1096 drain_virtio_tx(struct vhost_dev *vdev)
1098 struct rte_mbuf *pkts[MAX_PKT_BURST];
1102 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1103 pkts, MAX_PKT_BURST);
1105 /* setup VMDq for the first packet */
1106 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1107 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1108 free_pkts(pkts, count);
1111 for (i = 0; i < count; ++i)
1112 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1116 * Main function of vhost-switch. It basically does:
1118 * for each vhost device {
1121 * Which drains the host eth Rx queue linked to the vhost device,
1122 * and deliver all of them to guest virito Rx ring associated with
1123 * this vhost device.
1125 * - drain_virtio_tx()
1127 * Which drains the guest virtio Tx queue and deliver all of them
1128 * to the target, which could be another vhost device, or the
1129 * physical eth dev. The route is done in function "virtio_tx_route".
1133 switch_worker(void *arg __rte_unused)
1136 unsigned lcore_id = rte_lcore_id();
1137 struct vhost_dev *vdev;
1138 struct mbuf_table *tx_q;
1140 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1142 tx_q = &lcore_tx_queue[lcore_id];
1143 for (i = 0; i < rte_lcore_count(); i++) {
1144 if (lcore_ids[i] == lcore_id) {
1151 drain_mbuf_table(tx_q);
1154 * Inform the configuration core that we have exited the
1155 * linked list and that no devices are in use if requested.
1157 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1158 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1161 * Process vhost devices
1163 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1165 if (unlikely(vdev->remove)) {
1167 vdev->ready = DEVICE_SAFE_REMOVE;
1171 if (likely(vdev->ready == DEVICE_RX))
1174 if (likely(!vdev->remove))
1175 drain_virtio_tx(vdev);
1183 * Remove a device from the specific data core linked list and from the
1184 * main linked list. Synchonization occurs through the use of the
1185 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1186 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1189 destroy_device(int vid)
1191 struct vhost_dev *vdev = NULL;
1194 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1195 if (vdev->vid == vid)
1200 /*set the remove flag. */
1202 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1206 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1208 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1211 /* Set the dev_removal_flag on each lcore. */
1212 RTE_LCORE_FOREACH_SLAVE(lcore)
1213 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1216 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1217 * we can be sure that they can no longer access the device removed
1218 * from the linked lists and that the devices are no longer in use.
1220 RTE_LCORE_FOREACH_SLAVE(lcore) {
1221 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1225 lcore_info[vdev->coreid].device_num--;
1227 RTE_LOG(INFO, VHOST_DATA,
1228 "(%d) device has been removed from data core\n",
1235 * A new device is added to a data core. First the device is added to the main linked list
1236 * and the allocated to a specific data core.
1241 int lcore, core_add = 0;
1242 uint32_t device_num_min = num_devices;
1243 struct vhost_dev *vdev;
1245 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1247 RTE_LOG(INFO, VHOST_DATA,
1248 "(%d) couldn't allocate memory for vhost dev\n",
1254 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1255 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1257 /*reset ready flag*/
1258 vdev->ready = DEVICE_MAC_LEARNING;
1261 /* Find a suitable lcore to add the device. */
1262 RTE_LCORE_FOREACH_SLAVE(lcore) {
1263 if (lcore_info[lcore].device_num < device_num_min) {
1264 device_num_min = lcore_info[lcore].device_num;
1268 vdev->coreid = core_add;
1270 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1272 lcore_info[vdev->coreid].device_num++;
1274 /* Disable notifications. */
1275 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1276 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1278 RTE_LOG(INFO, VHOST_DATA,
1279 "(%d) device has been added to data core %d\n",
1286 * These callback allow devices to be added to the data core when configuration
1287 * has been fully complete.
1289 static const struct virtio_net_device_ops virtio_net_device_ops =
1291 .new_device = new_device,
1292 .destroy_device = destroy_device,
1296 * This is a thread will wake up after a period to print stats if the user has
1302 struct vhost_dev *vdev;
1303 uint64_t tx_dropped, rx_dropped;
1304 uint64_t tx, tx_total, rx, rx_total;
1305 const char clr[] = { 27, '[', '2', 'J', '\0' };
1306 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1309 sleep(enable_stats);
1311 /* Clear screen and move to top left */
1312 printf("%s%s\n", clr, top_left);
1313 printf("Device statistics =================================\n");
1315 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1316 tx_total = vdev->stats.tx_total;
1317 tx = vdev->stats.tx;
1318 tx_dropped = tx_total - tx;
1320 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1321 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1322 rx_dropped = rx_total - rx;
1324 printf("Statistics for device %d\n"
1325 "-----------------------\n"
1326 "TX total: %" PRIu64 "\n"
1327 "TX dropped: %" PRIu64 "\n"
1328 "TX successful: %" PRIu64 "\n"
1329 "RX total: %" PRIu64 "\n"
1330 "RX dropped: %" PRIu64 "\n"
1331 "RX successful: %" PRIu64 "\n",
1333 tx_total, tx_dropped, tx,
1334 rx_total, rx_dropped, rx);
1337 printf("===================================================\n");
1341 /* When we receive a INT signal, unregister vhost driver */
1343 sigint_handler(__rte_unused int signum)
1345 /* Unregister vhost driver. */
1346 int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1348 rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1353 * While creating an mbuf pool, one key thing is to figure out how
1354 * many mbuf entries is enough for our use. FYI, here are some
1357 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1359 * - For each switch core (A CPU core does the packet switch), we need
1360 * also make some reservation for receiving the packets from virtio
1361 * Tx queue. How many is enough depends on the usage. It's normally
1362 * a simple calculation like following:
1364 * MAX_PKT_BURST * max packet size / mbuf size
1366 * So, we definitely need allocate more mbufs when TSO is enabled.
1368 * - Similarly, for each switching core, we should serve @nr_rx_desc
1369 * mbufs for receiving the packets from physical NIC device.
1371 * - We also need make sure, for each switch core, we have allocated
1372 * enough mbufs to fill up the mbuf cache.
1375 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1376 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1379 uint32_t nr_mbufs_per_core;
1380 uint32_t mtu = 1500;
1387 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1388 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1389 nr_mbufs_per_core += nr_rx_desc;
1390 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1392 nr_mbufs = nr_queues * nr_rx_desc;
1393 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1394 nr_mbufs *= nr_port;
1396 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1397 nr_mbuf_cache, 0, mbuf_size,
1399 if (mbuf_pool == NULL)
1400 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1404 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1405 * device is also registered here to handle the IOCTLs.
1408 main(int argc, char *argv[])
1410 unsigned lcore_id, core_id = 0;
1411 unsigned nb_ports, valid_num_ports;
1414 static pthread_t tid;
1415 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1418 signal(SIGINT, sigint_handler);
1421 ret = rte_eal_init(argc, argv);
1423 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1427 /* parse app arguments */
1428 ret = us_vhost_parse_args(argc, argv);
1430 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1432 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1433 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1435 if (rte_lcore_is_enabled(lcore_id))
1436 lcore_ids[core_id ++] = lcore_id;
1438 if (rte_lcore_count() > RTE_MAX_LCORE)
1439 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1441 /* Get the number of physical ports. */
1442 nb_ports = rte_eth_dev_count();
1445 * Update the global var NUM_PORTS and global array PORTS
1446 * and get value of var VALID_NUM_PORTS according to system ports number
1448 valid_num_ports = check_ports_num(nb_ports);
1450 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1451 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1452 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1457 * FIXME: here we are trying to allocate mbufs big enough for
1458 * @MAX_QUEUES, but the truth is we're never going to use that
1459 * many queues here. We probably should only do allocation for
1460 * those queues we are going to use.
1462 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1463 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1465 if (vm2vm_mode == VM2VM_HARDWARE) {
1466 /* Enable VT loop back to let L2 switch to do it. */
1467 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1468 RTE_LOG(DEBUG, VHOST_CONFIG,
1469 "Enable loop back for L2 switch in vmdq.\n");
1472 /* initialize all ports */
1473 for (portid = 0; portid < nb_ports; portid++) {
1474 /* skip ports that are not enabled */
1475 if ((enabled_port_mask & (1 << portid)) == 0) {
1476 RTE_LOG(INFO, VHOST_PORT,
1477 "Skipping disabled port %d\n", portid);
1480 if (port_init(portid) != 0)
1481 rte_exit(EXIT_FAILURE,
1482 "Cannot initialize network ports\n");
1485 /* Enable stats if the user option is set. */
1487 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1489 rte_exit(EXIT_FAILURE,
1490 "Cannot create print-stats thread\n");
1492 /* Set thread_name for aid in debugging. */
1493 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1494 ret = rte_thread_setname(tid, thread_name);
1496 RTE_LOG(DEBUG, VHOST_CONFIG,
1497 "Cannot set print-stats name\n");
1500 /* Launch all data cores. */
1501 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1502 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1505 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1508 flags |= RTE_VHOST_USER_CLIENT;
1510 /* Register vhost(cuse or user) driver to handle vhost messages. */
1511 ret = rte_vhost_driver_register(dev_basename, flags);
1513 rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1515 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1517 /* Start CUSE session. */
1518 rte_vhost_driver_session_start();