4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
93 /* Maximum character device basename size. */
94 #define MAX_BASENAME_SZ 10
96 /* Maximum long option length for option parsing. */
97 #define MAX_LONG_OPT_SZ 64
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask = 0;
102 /* Promiscuous mode */
103 static uint32_t promiscuous;
105 /* number of devices/queues to support*/
106 static uint32_t num_queues = 0;
107 static uint32_t num_devices;
109 static struct rte_mempool *mbuf_pool;
110 static int mergeable;
112 /* Do vlan strip on host, enabled on default */
113 static uint32_t vlan_strip = 1;
115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
125 static uint32_t enable_stats = 0;
126 /* Enable retries on RX. */
127 static uint32_t enable_retry = 1;
129 /* Disable TX checksum offload */
130 static uint32_t enable_tx_csum;
132 /* Disable TSO offload */
133 static uint32_t enable_tso;
135 static int client_mode;
137 /* Specify timeout (in useconds) between retries on RX. */
138 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139 /* Specify the number of retries on RX. */
140 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
142 /* Character device basename. Can be set by user. */
143 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
145 /* empty vmdq configuration structure. Filled in programatically */
146 static struct rte_eth_conf vmdq_conf_default = {
148 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
150 .header_split = 0, /**< Header Split disabled */
151 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
152 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
154 * It is necessary for 1G NIC such as I350,
155 * this fixes bug of ipv4 forwarding in guest can't
156 * forward pakets from one virtio dev to another virtio dev.
158 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
159 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
160 .hw_strip_crc = 0, /**< CRC stripped by hardware */
164 .mq_mode = ETH_MQ_TX_NONE,
168 * should be overridden separately in code with
172 .nb_queue_pools = ETH_8_POOLS,
173 .enable_default_pool = 0,
176 .pool_map = {{0, 0},},
181 static unsigned lcore_ids[RTE_MAX_LCORE];
182 static uint8_t ports[RTE_MAX_ETHPORTS];
183 static unsigned num_ports = 0; /**< The number of ports specified in command line */
184 static uint16_t num_pf_queues, num_vmdq_queues;
185 static uint16_t vmdq_pool_base, vmdq_queue_base;
186 static uint16_t queues_per_pool;
188 const uint16_t vlan_tags[] = {
189 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
191 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
199 /* ethernet addresses of ports */
200 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
202 static struct vhost_dev_tailq_list vhost_dev_list =
203 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
205 static struct lcore_info lcore_info[RTE_MAX_LCORE];
207 /* Used for queueing bursts of TX packets. */
211 struct rte_mbuf *m_table[MAX_PKT_BURST];
214 /* TX queue for each data core. */
215 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
217 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
218 / US_PER_S * BURST_TX_DRAIN_US)
222 * Builds up the correct configuration for VMDQ VLAN pool map
223 * according to the pool & queue limits.
226 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
228 struct rte_eth_vmdq_rx_conf conf;
229 struct rte_eth_vmdq_rx_conf *def_conf =
230 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
233 memset(&conf, 0, sizeof(conf));
234 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235 conf.nb_pool_maps = num_devices;
236 conf.enable_loop_back = def_conf->enable_loop_back;
237 conf.rx_mode = def_conf->rx_mode;
239 for (i = 0; i < conf.nb_pool_maps; i++) {
240 conf.pool_map[i].vlan_id = vlan_tags[ i ];
241 conf.pool_map[i].pools = (1UL << i);
244 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
251 * Validate the device number according to the max pool number gotten form
252 * dev_info. If the device number is invalid, give the error message and
253 * return -1. Each device must have its own pool.
256 validate_num_devices(uint32_t max_nb_devices)
258 if (num_devices > max_nb_devices) {
259 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
266 * Initialises a given port using global settings and with the rx buffers
267 * coming from the mbuf_pool passed as parameter
270 port_init(uint8_t port)
272 struct rte_eth_dev_info dev_info;
273 struct rte_eth_conf port_conf;
274 struct rte_eth_rxconf *rxconf;
275 struct rte_eth_txconf *txconf;
276 int16_t rx_rings, tx_rings;
277 uint16_t rx_ring_size, tx_ring_size;
281 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282 rte_eth_dev_info_get (port, &dev_info);
284 if (dev_info.max_rx_queues > MAX_QUEUES) {
285 rte_exit(EXIT_FAILURE,
286 "please define MAX_QUEUES no less than %u in %s\n",
287 dev_info.max_rx_queues, __FILE__);
290 rxconf = &dev_info.default_rxconf;
291 txconf = &dev_info.default_txconf;
292 rxconf->rx_drop_en = 1;
294 /* Enable vlan offload */
295 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
297 /*configure the number of supported virtio devices based on VMDQ limits */
298 num_devices = dev_info.max_vmdq_pools;
300 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302 tx_rings = (uint16_t)rte_lcore_count();
304 retval = validate_num_devices(MAX_DEVICES);
308 /* Get port configuration. */
309 retval = get_eth_conf(&port_conf, num_devices);
312 /* NIC queues are divided into pf queues and vmdq queues. */
313 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 num_vmdq_queues = num_devices * queues_per_pool;
316 num_queues = num_pf_queues + num_vmdq_queues;
317 vmdq_queue_base = dev_info.vmdq_queue_base;
318 vmdq_pool_base = dev_info.vmdq_pool_base;
319 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 num_pf_queues, num_devices, queues_per_pool);
322 if (port >= rte_eth_dev_count()) return -1;
324 if (enable_tx_csum == 0)
325 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
327 if (enable_tso == 0) {
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
330 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
331 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
334 rx_rings = (uint16_t)dev_info.max_rx_queues;
335 /* Configure ethernet device. */
336 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
338 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
339 port, strerror(-retval));
343 /* Setup the queues. */
344 for (q = 0; q < rx_rings; q ++) {
345 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
346 rte_eth_dev_socket_id(port),
350 RTE_LOG(ERR, VHOST_PORT,
351 "Failed to setup rx queue %u of port %u: %s.\n",
352 q, port, strerror(-retval));
356 for (q = 0; q < tx_rings; q ++) {
357 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
358 rte_eth_dev_socket_id(port),
361 RTE_LOG(ERR, VHOST_PORT,
362 "Failed to setup tx queue %u of port %u: %s.\n",
363 q, port, strerror(-retval));
368 /* Start the device. */
369 retval = rte_eth_dev_start(port);
371 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
372 port, strerror(-retval));
377 rte_eth_promiscuous_enable(port);
379 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
380 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
381 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
382 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
384 vmdq_ports_eth_addr[port].addr_bytes[0],
385 vmdq_ports_eth_addr[port].addr_bytes[1],
386 vmdq_ports_eth_addr[port].addr_bytes[2],
387 vmdq_ports_eth_addr[port].addr_bytes[3],
388 vmdq_ports_eth_addr[port].addr_bytes[4],
389 vmdq_ports_eth_addr[port].addr_bytes[5]);
395 * Set character device basename.
398 us_vhost_parse_basename(const char *q_arg)
400 /* parse number string */
402 if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
405 snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
411 * Parse the portmask provided at run time.
414 parse_portmask(const char *portmask)
421 /* parse hexadecimal string */
422 pm = strtoul(portmask, &end, 16);
423 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
434 * Parse num options at run time.
437 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
444 /* parse unsigned int string */
445 num = strtoul(q_arg, &end, 10);
446 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
449 if (num > max_valid_value)
460 us_vhost_usage(const char *prgname)
462 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
464 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
465 " --dev-basename <name>\n"
467 " -p PORTMASK: Set mask for ports to be used by application\n"
468 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
469 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
470 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
471 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
472 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
473 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
474 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
475 " --dev-basename: The basename to be used for the character device.\n"
476 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
477 " --tso [0|1] disable/enable TCP segment offload.\n"
478 " --client register a vhost-user socket as client mode.\n",
483 * Parse the arguments given in the command line of the application.
486 us_vhost_parse_args(int argc, char **argv)
491 const char *prgname = argv[0];
492 static struct option long_option[] = {
493 {"vm2vm", required_argument, NULL, 0},
494 {"rx-retry", required_argument, NULL, 0},
495 {"rx-retry-delay", required_argument, NULL, 0},
496 {"rx-retry-num", required_argument, NULL, 0},
497 {"mergeable", required_argument, NULL, 0},
498 {"vlan-strip", required_argument, NULL, 0},
499 {"stats", required_argument, NULL, 0},
500 {"dev-basename", required_argument, NULL, 0},
501 {"tx-csum", required_argument, NULL, 0},
502 {"tso", required_argument, NULL, 0},
503 {"client", no_argument, &client_mode, 1},
507 /* Parse command line */
508 while ((opt = getopt_long(argc, argv, "p:P",
509 long_option, &option_index)) != EOF) {
513 enabled_port_mask = parse_portmask(optarg);
514 if (enabled_port_mask == 0) {
515 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
516 us_vhost_usage(prgname);
523 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
524 ETH_VMDQ_ACCEPT_BROADCAST |
525 ETH_VMDQ_ACCEPT_MULTICAST;
526 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
531 /* Enable/disable vm2vm comms. */
532 if (!strncmp(long_option[option_index].name, "vm2vm",
534 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
536 RTE_LOG(INFO, VHOST_CONFIG,
537 "Invalid argument for "
539 us_vhost_usage(prgname);
542 vm2vm_mode = (vm2vm_type)ret;
546 /* Enable/disable retries on RX. */
547 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
548 ret = parse_num_opt(optarg, 1);
550 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
551 us_vhost_usage(prgname);
558 /* Enable/disable TX checksum offload. */
559 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
560 ret = parse_num_opt(optarg, 1);
562 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
563 us_vhost_usage(prgname);
566 enable_tx_csum = ret;
569 /* Enable/disable TSO offload. */
570 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
571 ret = parse_num_opt(optarg, 1);
573 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
574 us_vhost_usage(prgname);
580 /* Specify the retries delay time (in useconds) on RX. */
581 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
582 ret = parse_num_opt(optarg, INT32_MAX);
584 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
585 us_vhost_usage(prgname);
588 burst_rx_delay_time = ret;
592 /* Specify the retries number on RX. */
593 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
594 ret = parse_num_opt(optarg, INT32_MAX);
596 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
597 us_vhost_usage(prgname);
600 burst_rx_retry_num = ret;
604 /* Enable/disable RX mergeable buffers. */
605 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
606 ret = parse_num_opt(optarg, 1);
608 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
609 us_vhost_usage(prgname);
614 vmdq_conf_default.rxmode.jumbo_frame = 1;
615 vmdq_conf_default.rxmode.max_rx_pkt_len
616 = JUMBO_FRAME_MAX_SIZE;
621 /* Enable/disable RX VLAN strip on host. */
622 if (!strncmp(long_option[option_index].name,
623 "vlan-strip", MAX_LONG_OPT_SZ)) {
624 ret = parse_num_opt(optarg, 1);
626 RTE_LOG(INFO, VHOST_CONFIG,
627 "Invalid argument for VLAN strip [0|1]\n");
628 us_vhost_usage(prgname);
632 vmdq_conf_default.rxmode.hw_vlan_strip =
637 /* Enable/disable stats. */
638 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
639 ret = parse_num_opt(optarg, INT32_MAX);
641 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
642 us_vhost_usage(prgname);
649 /* Set character device basename. */
650 if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
651 if (us_vhost_parse_basename(optarg) == -1) {
652 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
653 us_vhost_usage(prgname);
660 /* Invalid option - print options. */
662 us_vhost_usage(prgname);
667 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
668 if (enabled_port_mask & (1 << i))
669 ports[num_ports++] = (uint8_t)i;
672 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
673 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
674 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
682 * Update the global var NUM_PORTS and array PORTS according to system ports number
683 * and return valid ports number
685 static unsigned check_ports_num(unsigned nb_ports)
687 unsigned valid_num_ports = num_ports;
690 if (num_ports > nb_ports) {
691 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
692 num_ports, nb_ports);
693 num_ports = nb_ports;
696 for (portid = 0; portid < num_ports; portid ++) {
697 if (ports[portid] >= nb_ports) {
698 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
699 ports[portid], (nb_ports - 1));
700 ports[portid] = INVALID_PORT_ID;
704 return valid_num_ports;
707 static inline struct vhost_dev *__attribute__((always_inline))
708 find_vhost_dev(struct ether_addr *mac)
710 struct vhost_dev *vdev;
712 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
713 if (vdev->ready == DEVICE_RX &&
714 is_same_ether_addr(mac, &vdev->mac_address))
722 * This function learns the MAC address of the device and registers this along with a
723 * vlan tag to a VMDQ.
726 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
728 struct ether_hdr *pkt_hdr;
731 /* Learn MAC address of guest device from packet */
732 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
734 if (find_vhost_dev(&pkt_hdr->s_addr)) {
735 RTE_LOG(ERR, VHOST_DATA,
736 "(%d) device is using a registered MAC!\n",
741 for (i = 0; i < ETHER_ADDR_LEN; i++)
742 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
744 /* vlan_tag currently uses the device_id. */
745 vdev->vlan_tag = vlan_tags[vdev->vid];
747 /* Print out VMDQ registration info. */
748 RTE_LOG(INFO, VHOST_DATA,
749 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
751 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
752 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
753 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
756 /* Register the MAC address. */
757 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
758 (uint32_t)vdev->vid + vmdq_pool_base);
760 RTE_LOG(ERR, VHOST_DATA,
761 "(%d) failed to add device MAC address to VMDQ\n",
764 /* Enable stripping of the vlan tag as we handle routing. */
766 rte_eth_dev_set_vlan_strip_on_queue(ports[0],
767 (uint16_t)vdev->vmdq_rx_q, 1);
769 /* Set device as ready for RX. */
770 vdev->ready = DEVICE_RX;
776 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
777 * queue before disabling RX on the device.
780 unlink_vmdq(struct vhost_dev *vdev)
784 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
786 if (vdev->ready == DEVICE_RX) {
787 /*clear MAC and VLAN settings*/
788 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
789 for (i = 0; i < 6; i++)
790 vdev->mac_address.addr_bytes[i] = 0;
794 /*Clear out the receive buffers*/
795 rx_count = rte_eth_rx_burst(ports[0],
796 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
799 for (i = 0; i < rx_count; i++)
800 rte_pktmbuf_free(pkts_burst[i]);
802 rx_count = rte_eth_rx_burst(ports[0],
803 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
806 vdev->ready = DEVICE_MAC_LEARNING;
810 static inline void __attribute__((always_inline))
811 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
816 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
818 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
819 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
820 src_vdev->stats.tx_total++;
821 src_vdev->stats.tx += ret;
826 * Check if the packet destination MAC address is for a local device. If so then put
827 * the packet on that devices RX queue. If not then return.
829 static inline int __attribute__((always_inline))
830 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
832 struct ether_hdr *pkt_hdr;
833 struct vhost_dev *dst_vdev;
835 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
837 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
841 if (vdev->vid == dst_vdev->vid) {
842 RTE_LOG(DEBUG, VHOST_DATA,
843 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
848 RTE_LOG(DEBUG, VHOST_DATA,
849 "(%d) TX: MAC address is local\n", dst_vdev->vid);
851 if (unlikely(dst_vdev->remove)) {
852 RTE_LOG(DEBUG, VHOST_DATA,
853 "(%d) device is marked for removal\n", dst_vdev->vid);
857 virtio_xmit(dst_vdev, vdev, m);
862 * Check if the destination MAC of a packet is one local VM,
863 * and get its vlan tag, and offset if it is.
865 static inline int __attribute__((always_inline))
866 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
867 uint32_t *offset, uint16_t *vlan_tag)
869 struct vhost_dev *dst_vdev;
870 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
872 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
876 if (vdev->vid == dst_vdev->vid) {
877 RTE_LOG(DEBUG, VHOST_DATA,
878 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
884 * HW vlan strip will reduce the packet length
885 * by minus length of vlan tag, so need restore
886 * the packet length by plus it.
889 *vlan_tag = vlan_tags[vdev->vid];
891 RTE_LOG(DEBUG, VHOST_DATA,
892 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
893 vdev->vid, dst_vdev->vid, *vlan_tag);
899 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
901 if (ol_flags & PKT_TX_IPV4)
902 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
903 else /* assume ethertype == ETHER_TYPE_IPv6 */
904 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
907 static void virtio_tx_offload(struct rte_mbuf *m)
910 struct ipv4_hdr *ipv4_hdr = NULL;
911 struct tcp_hdr *tcp_hdr = NULL;
912 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
914 l3_hdr = (char *)eth_hdr + m->l2_len;
916 if (m->ol_flags & PKT_TX_IPV4) {
918 ipv4_hdr->hdr_checksum = 0;
919 m->ol_flags |= PKT_TX_IP_CKSUM;
922 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
923 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
927 free_pkts(struct rte_mbuf **pkts, uint16_t n)
930 rte_pktmbuf_free(pkts[n]);
933 static inline void __attribute__((always_inline))
934 do_drain_mbuf_table(struct mbuf_table *tx_q)
938 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
939 tx_q->m_table, tx_q->len);
940 if (unlikely(count < tx_q->len))
941 free_pkts(&tx_q->m_table[count], tx_q->len - count);
947 * This function routes the TX packet to the correct interface. This
948 * may be a local device or the physical port.
950 static inline void __attribute__((always_inline))
951 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
953 struct mbuf_table *tx_q;
955 const uint16_t lcore_id = rte_lcore_id();
956 struct ether_hdr *nh;
959 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
960 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
961 struct vhost_dev *vdev2;
963 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
964 virtio_xmit(vdev2, vdev, m);
969 /*check if destination is local VM*/
970 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
975 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
976 if (unlikely(find_local_dest(vdev, m, &offset,
983 RTE_LOG(DEBUG, VHOST_DATA,
984 "(%d) TX: MAC address is external\n", vdev->vid);
988 /*Add packet to the port tx queue*/
989 tx_q = &lcore_tx_queue[lcore_id];
991 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
992 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
993 /* Guest has inserted the vlan tag. */
994 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
995 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
996 if ((vm2vm_mode == VM2VM_HARDWARE) &&
997 (vh->vlan_tci != vlan_tag_be))
998 vh->vlan_tci = vlan_tag_be;
1000 m->ol_flags |= PKT_TX_VLAN_PKT;
1003 * Find the right seg to adjust the data len when offset is
1004 * bigger than tail room size.
1006 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1007 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1008 m->data_len += offset;
1010 struct rte_mbuf *seg = m;
1012 while ((seg->next != NULL) &&
1013 (offset > rte_pktmbuf_tailroom(seg)))
1016 seg->data_len += offset;
1018 m->pkt_len += offset;
1021 m->vlan_tci = vlan_tag;
1024 if (m->ol_flags & PKT_TX_TCP_SEG)
1025 virtio_tx_offload(m);
1027 tx_q->m_table[tx_q->len++] = m;
1029 vdev->stats.tx_total++;
1033 if (unlikely(tx_q->len == MAX_PKT_BURST))
1034 do_drain_mbuf_table(tx_q);
1038 static inline void __attribute__((always_inline))
1039 drain_mbuf_table(struct mbuf_table *tx_q)
1041 static uint64_t prev_tsc;
1047 cur_tsc = rte_rdtsc();
1048 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1051 RTE_LOG(DEBUG, VHOST_DATA,
1052 "TX queue drained after timeout with burst size %u\n",
1054 do_drain_mbuf_table(tx_q);
1058 static inline void __attribute__((always_inline))
1059 drain_eth_rx(struct vhost_dev *vdev)
1061 uint16_t rx_count, enqueue_count;
1062 struct rte_mbuf *pkts[MAX_PKT_BURST];
1064 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1065 pkts, MAX_PKT_BURST);
1070 * When "enable_retry" is set, here we wait and retry when there
1071 * is no enough free slots in the queue to hold @rx_count packets,
1072 * to diminish packet loss.
1075 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1079 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1080 rte_delay_us(burst_rx_delay_time);
1081 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1087 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1090 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1091 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1094 free_pkts(pkts, rx_count);
1097 static inline void __attribute__((always_inline))
1098 drain_virtio_tx(struct vhost_dev *vdev)
1100 struct rte_mbuf *pkts[MAX_PKT_BURST];
1104 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1105 pkts, MAX_PKT_BURST);
1107 /* setup VMDq for the first packet */
1108 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1109 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1110 free_pkts(pkts, count);
1113 for (i = 0; i < count; ++i)
1114 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1118 * Main function of vhost-switch. It basically does:
1120 * for each vhost device {
1123 * Which drains the host eth Rx queue linked to the vhost device,
1124 * and deliver all of them to guest virito Rx ring associated with
1125 * this vhost device.
1127 * - drain_virtio_tx()
1129 * Which drains the guest virtio Tx queue and deliver all of them
1130 * to the target, which could be another vhost device, or the
1131 * physical eth dev. The route is done in function "virtio_tx_route".
1135 switch_worker(void *arg __rte_unused)
1138 unsigned lcore_id = rte_lcore_id();
1139 struct vhost_dev *vdev;
1140 struct mbuf_table *tx_q;
1142 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1144 tx_q = &lcore_tx_queue[lcore_id];
1145 for (i = 0; i < rte_lcore_count(); i++) {
1146 if (lcore_ids[i] == lcore_id) {
1153 drain_mbuf_table(tx_q);
1156 * Inform the configuration core that we have exited the
1157 * linked list and that no devices are in use if requested.
1159 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1160 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1163 * Process vhost devices
1165 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1167 if (unlikely(vdev->remove)) {
1169 vdev->ready = DEVICE_SAFE_REMOVE;
1173 if (likely(vdev->ready == DEVICE_RX))
1176 if (likely(!vdev->remove))
1177 drain_virtio_tx(vdev);
1185 * Remove a device from the specific data core linked list and from the
1186 * main linked list. Synchonization occurs through the use of the
1187 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1188 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1191 destroy_device(int vid)
1193 struct vhost_dev *vdev = NULL;
1196 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1197 if (vdev->vid == vid)
1202 /*set the remove flag. */
1204 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1208 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1210 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1213 /* Set the dev_removal_flag on each lcore. */
1214 RTE_LCORE_FOREACH_SLAVE(lcore)
1215 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1218 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1219 * we can be sure that they can no longer access the device removed
1220 * from the linked lists and that the devices are no longer in use.
1222 RTE_LCORE_FOREACH_SLAVE(lcore) {
1223 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1227 lcore_info[vdev->coreid].device_num--;
1229 RTE_LOG(INFO, VHOST_DATA,
1230 "(%d) device has been removed from data core\n",
1237 * A new device is added to a data core. First the device is added to the main linked list
1238 * and the allocated to a specific data core.
1243 int lcore, core_add = 0;
1244 uint32_t device_num_min = num_devices;
1245 struct vhost_dev *vdev;
1247 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1249 RTE_LOG(INFO, VHOST_DATA,
1250 "(%d) couldn't allocate memory for vhost dev\n",
1256 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1257 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1259 /*reset ready flag*/
1260 vdev->ready = DEVICE_MAC_LEARNING;
1263 /* Find a suitable lcore to add the device. */
1264 RTE_LCORE_FOREACH_SLAVE(lcore) {
1265 if (lcore_info[lcore].device_num < device_num_min) {
1266 device_num_min = lcore_info[lcore].device_num;
1270 vdev->coreid = core_add;
1272 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1274 lcore_info[vdev->coreid].device_num++;
1276 /* Disable notifications. */
1277 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1278 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1280 RTE_LOG(INFO, VHOST_DATA,
1281 "(%d) device has been added to data core %d\n",
1288 * These callback allow devices to be added to the data core when configuration
1289 * has been fully complete.
1291 static const struct virtio_net_device_ops virtio_net_device_ops =
1293 .new_device = new_device,
1294 .destroy_device = destroy_device,
1298 * This is a thread will wake up after a period to print stats if the user has
1304 struct vhost_dev *vdev;
1305 uint64_t tx_dropped, rx_dropped;
1306 uint64_t tx, tx_total, rx, rx_total;
1307 const char clr[] = { 27, '[', '2', 'J', '\0' };
1308 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1311 sleep(enable_stats);
1313 /* Clear screen and move to top left */
1314 printf("%s%s\n", clr, top_left);
1315 printf("Device statistics =================================\n");
1317 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1318 tx_total = vdev->stats.tx_total;
1319 tx = vdev->stats.tx;
1320 tx_dropped = tx_total - tx;
1322 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1323 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1324 rx_dropped = rx_total - rx;
1326 printf("Statistics for device %d\n"
1327 "-----------------------\n"
1328 "TX total: %" PRIu64 "\n"
1329 "TX dropped: %" PRIu64 "\n"
1330 "TX successful: %" PRIu64 "\n"
1331 "RX total: %" PRIu64 "\n"
1332 "RX dropped: %" PRIu64 "\n"
1333 "RX successful: %" PRIu64 "\n",
1335 tx_total, tx_dropped, tx,
1336 rx_total, rx_dropped, rx);
1339 printf("===================================================\n");
1343 /* When we receive a INT signal, unregister vhost driver */
1345 sigint_handler(__rte_unused int signum)
1347 /* Unregister vhost driver. */
1348 int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1350 rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1355 * While creating an mbuf pool, one key thing is to figure out how
1356 * many mbuf entries is enough for our use. FYI, here are some
1359 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1361 * - For each switch core (A CPU core does the packet switch), we need
1362 * also make some reservation for receiving the packets from virtio
1363 * Tx queue. How many is enough depends on the usage. It's normally
1364 * a simple calculation like following:
1366 * MAX_PKT_BURST * max packet size / mbuf size
1368 * So, we definitely need allocate more mbufs when TSO is enabled.
1370 * - Similarly, for each switching core, we should serve @nr_rx_desc
1371 * mbufs for receiving the packets from physical NIC device.
1373 * - We also need make sure, for each switch core, we have allocated
1374 * enough mbufs to fill up the mbuf cache.
1377 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1378 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1381 uint32_t nr_mbufs_per_core;
1382 uint32_t mtu = 1500;
1389 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1390 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1391 nr_mbufs_per_core += nr_rx_desc;
1392 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1394 nr_mbufs = nr_queues * nr_rx_desc;
1395 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1396 nr_mbufs *= nr_port;
1398 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1399 nr_mbuf_cache, 0, mbuf_size,
1401 if (mbuf_pool == NULL)
1402 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1406 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1407 * device is also registered here to handle the IOCTLs.
1410 main(int argc, char *argv[])
1412 unsigned lcore_id, core_id = 0;
1413 unsigned nb_ports, valid_num_ports;
1416 static pthread_t tid;
1417 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1420 signal(SIGINT, sigint_handler);
1423 ret = rte_eal_init(argc, argv);
1425 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1429 /* parse app arguments */
1430 ret = us_vhost_parse_args(argc, argv);
1432 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1434 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1435 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1437 if (rte_lcore_is_enabled(lcore_id))
1438 lcore_ids[core_id ++] = lcore_id;
1440 if (rte_lcore_count() > RTE_MAX_LCORE)
1441 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1443 /* Get the number of physical ports. */
1444 nb_ports = rte_eth_dev_count();
1447 * Update the global var NUM_PORTS and global array PORTS
1448 * and get value of var VALID_NUM_PORTS according to system ports number
1450 valid_num_ports = check_ports_num(nb_ports);
1452 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1453 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1454 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1459 * FIXME: here we are trying to allocate mbufs big enough for
1460 * @MAX_QUEUES, but the truth is we're never going to use that
1461 * many queues here. We probably should only do allocation for
1462 * those queues we are going to use.
1464 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1465 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1467 if (vm2vm_mode == VM2VM_HARDWARE) {
1468 /* Enable VT loop back to let L2 switch to do it. */
1469 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1470 RTE_LOG(DEBUG, VHOST_CONFIG,
1471 "Enable loop back for L2 switch in vmdq.\n");
1474 /* initialize all ports */
1475 for (portid = 0; portid < nb_ports; portid++) {
1476 /* skip ports that are not enabled */
1477 if ((enabled_port_mask & (1 << portid)) == 0) {
1478 RTE_LOG(INFO, VHOST_PORT,
1479 "Skipping disabled port %d\n", portid);
1482 if (port_init(portid) != 0)
1483 rte_exit(EXIT_FAILURE,
1484 "Cannot initialize network ports\n");
1487 /* Enable stats if the user option is set. */
1489 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1491 rte_exit(EXIT_FAILURE,
1492 "Cannot create print-stats thread\n");
1494 /* Set thread_name for aid in debugging. */
1495 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1496 ret = rte_thread_setname(tid, thread_name);
1498 RTE_LOG(DEBUG, VHOST_CONFIG,
1499 "Cannot set print-stats name\n");
1502 /* Launch all data cores. */
1503 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1504 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1507 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1510 flags |= RTE_VHOST_USER_CLIENT;
1512 /* Register vhost(cuse or user) driver to handle vhost messages. */
1513 ret = rte_vhost_driver_register(dev_basename, flags);
1515 rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1517 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1519 /* Start CUSE session. */
1520 rte_vhost_driver_session_start();