1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Max number of devices. Limited by vmdq. */
59 #define MAX_DEVICES 64
61 /* Size of buffers used for snprintfs. */
62 #define MAX_PRINT_BUFF 6072
64 /* Maximum long option length for option parsing. */
65 #define MAX_LONG_OPT_SZ 64
67 /* mask of enabled ports */
68 static uint32_t enabled_port_mask = 0;
70 /* Promiscuous mode */
71 static uint32_t promiscuous;
73 /* number of devices/queues to support*/
74 static uint32_t num_queues = 0;
75 static uint32_t num_devices;
77 static struct rte_mempool *mbuf_pool;
80 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
87 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
90 static uint32_t enable_stats = 0;
91 /* Enable retries on RX. */
92 static uint32_t enable_retry = 1;
94 /* Disable TX checksum offload */
95 static uint32_t enable_tx_csum;
97 /* Disable TSO offload */
98 static uint32_t enable_tso;
100 static int client_mode;
101 static int dequeue_zero_copy;
103 static int builtin_net_driver;
105 /* Specify timeout (in useconds) between retries on RX. */
106 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
107 /* Specify the number of retries on RX. */
108 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
110 /* Socket file paths. Can be set by user */
111 static char *socket_files;
112 static int nb_sockets;
114 /* empty vmdq configuration structure. Filled in programatically */
115 static struct rte_eth_conf vmdq_conf_default = {
117 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
119 .header_split = 0, /**< Header Split disabled */
120 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
121 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
123 * It is necessary for 1G NIC such as I350,
124 * this fixes bug of ipv4 forwarding in guest can't
125 * forward pakets from one virtio dev to another virtio dev.
127 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
128 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
129 .hw_strip_crc = 1, /**< CRC stripped by hardware */
133 .mq_mode = ETH_MQ_TX_NONE,
137 * should be overridden separately in code with
141 .nb_queue_pools = ETH_8_POOLS,
142 .enable_default_pool = 0,
145 .pool_map = {{0, 0},},
150 static unsigned lcore_ids[RTE_MAX_LCORE];
151 static uint16_t ports[RTE_MAX_ETHPORTS];
152 static unsigned num_ports = 0; /**< The number of ports specified in command line */
153 static uint16_t num_pf_queues, num_vmdq_queues;
154 static uint16_t vmdq_pool_base, vmdq_queue_base;
155 static uint16_t queues_per_pool;
157 const uint16_t vlan_tags[] = {
158 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
159 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
160 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
161 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
162 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
163 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
164 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
165 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
168 /* ethernet addresses of ports */
169 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
171 static struct vhost_dev_tailq_list vhost_dev_list =
172 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
174 static struct lcore_info lcore_info[RTE_MAX_LCORE];
176 /* Used for queueing bursts of TX packets. */
180 struct rte_mbuf *m_table[MAX_PKT_BURST];
183 /* TX queue for each data core. */
184 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
186 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
187 / US_PER_S * BURST_TX_DRAIN_US)
191 * Builds up the correct configuration for VMDQ VLAN pool map
192 * according to the pool & queue limits.
195 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
197 struct rte_eth_vmdq_rx_conf conf;
198 struct rte_eth_vmdq_rx_conf *def_conf =
199 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
202 memset(&conf, 0, sizeof(conf));
203 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
204 conf.nb_pool_maps = num_devices;
205 conf.enable_loop_back = def_conf->enable_loop_back;
206 conf.rx_mode = def_conf->rx_mode;
208 for (i = 0; i < conf.nb_pool_maps; i++) {
209 conf.pool_map[i].vlan_id = vlan_tags[ i ];
210 conf.pool_map[i].pools = (1UL << i);
213 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
214 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
215 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
220 * Validate the device number according to the max pool number gotten form
221 * dev_info. If the device number is invalid, give the error message and
222 * return -1. Each device must have its own pool.
225 validate_num_devices(uint32_t max_nb_devices)
227 if (num_devices > max_nb_devices) {
228 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
235 * Initialises a given port using global settings and with the rx buffers
236 * coming from the mbuf_pool passed as parameter
239 port_init(uint16_t port)
241 struct rte_eth_dev_info dev_info;
242 struct rte_eth_conf port_conf;
243 struct rte_eth_rxconf *rxconf;
244 struct rte_eth_txconf *txconf;
245 int16_t rx_rings, tx_rings;
246 uint16_t rx_ring_size, tx_ring_size;
250 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
251 rte_eth_dev_info_get (port, &dev_info);
253 if (dev_info.max_rx_queues > MAX_QUEUES) {
254 rte_exit(EXIT_FAILURE,
255 "please define MAX_QUEUES no less than %u in %s\n",
256 dev_info.max_rx_queues, __FILE__);
259 rxconf = &dev_info.default_rxconf;
260 txconf = &dev_info.default_txconf;
261 rxconf->rx_drop_en = 1;
263 /* Enable vlan offload */
264 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
266 /*configure the number of supported virtio devices based on VMDQ limits */
267 num_devices = dev_info.max_vmdq_pools;
269 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
270 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
273 * When dequeue zero copy is enabled, guest Tx used vring will be
274 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
275 * (tx_ring_size here) must be small enough so that the driver will
276 * hit the free threshold easily and free mbufs timely. Otherwise,
277 * guest Tx vring would be starved.
279 if (dequeue_zero_copy)
282 tx_rings = (uint16_t)rte_lcore_count();
284 retval = validate_num_devices(MAX_DEVICES);
288 /* Get port configuration. */
289 retval = get_eth_conf(&port_conf, num_devices);
292 /* NIC queues are divided into pf queues and vmdq queues. */
293 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
294 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
295 num_vmdq_queues = num_devices * queues_per_pool;
296 num_queues = num_pf_queues + num_vmdq_queues;
297 vmdq_queue_base = dev_info.vmdq_queue_base;
298 vmdq_pool_base = dev_info.vmdq_pool_base;
299 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
300 num_pf_queues, num_devices, queues_per_pool);
302 if (port >= rte_eth_dev_count()) return -1;
304 rx_rings = (uint16_t)dev_info.max_rx_queues;
305 /* Configure ethernet device. */
306 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
308 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
309 port, strerror(-retval));
313 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
316 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
317 "for port %u: %s.\n", port, strerror(-retval));
320 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
321 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
322 "for Rx queues on port %u.\n", port);
326 /* Setup the queues. */
327 for (q = 0; q < rx_rings; q ++) {
328 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
329 rte_eth_dev_socket_id(port),
333 RTE_LOG(ERR, VHOST_PORT,
334 "Failed to setup rx queue %u of port %u: %s.\n",
335 q, port, strerror(-retval));
339 for (q = 0; q < tx_rings; q ++) {
340 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
341 rte_eth_dev_socket_id(port),
344 RTE_LOG(ERR, VHOST_PORT,
345 "Failed to setup tx queue %u of port %u: %s.\n",
346 q, port, strerror(-retval));
351 /* Start the device. */
352 retval = rte_eth_dev_start(port);
354 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
355 port, strerror(-retval));
360 rte_eth_promiscuous_enable(port);
362 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
363 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
364 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
365 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
367 vmdq_ports_eth_addr[port].addr_bytes[0],
368 vmdq_ports_eth_addr[port].addr_bytes[1],
369 vmdq_ports_eth_addr[port].addr_bytes[2],
370 vmdq_ports_eth_addr[port].addr_bytes[3],
371 vmdq_ports_eth_addr[port].addr_bytes[4],
372 vmdq_ports_eth_addr[port].addr_bytes[5]);
378 * Set socket file path.
381 us_vhost_parse_socket_path(const char *q_arg)
383 /* parse number string */
384 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
387 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
388 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
395 * Parse the portmask provided at run time.
398 parse_portmask(const char *portmask)
405 /* parse hexadecimal string */
406 pm = strtoul(portmask, &end, 16);
407 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
418 * Parse num options at run time.
421 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
428 /* parse unsigned int string */
429 num = strtoul(q_arg, &end, 10);
430 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
433 if (num > max_valid_value)
444 us_vhost_usage(const char *prgname)
446 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
448 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
449 " --socket-file <path>\n"
451 " -p PORTMASK: Set mask for ports to be used by application\n"
452 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
453 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
454 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
455 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
456 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
457 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
458 " --socket-file: The path of the socket file.\n"
459 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
460 " --tso [0|1] disable/enable TCP segment offload.\n"
461 " --client register a vhost-user socket as client mode.\n"
462 " --dequeue-zero-copy enables dequeue zero copy\n",
467 * Parse the arguments given in the command line of the application.
470 us_vhost_parse_args(int argc, char **argv)
475 const char *prgname = argv[0];
476 static struct option long_option[] = {
477 {"vm2vm", required_argument, NULL, 0},
478 {"rx-retry", required_argument, NULL, 0},
479 {"rx-retry-delay", required_argument, NULL, 0},
480 {"rx-retry-num", required_argument, NULL, 0},
481 {"mergeable", required_argument, NULL, 0},
482 {"stats", required_argument, NULL, 0},
483 {"socket-file", required_argument, NULL, 0},
484 {"tx-csum", required_argument, NULL, 0},
485 {"tso", required_argument, NULL, 0},
486 {"client", no_argument, &client_mode, 1},
487 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
488 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
492 /* Parse command line */
493 while ((opt = getopt_long(argc, argv, "p:P",
494 long_option, &option_index)) != EOF) {
498 enabled_port_mask = parse_portmask(optarg);
499 if (enabled_port_mask == 0) {
500 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
501 us_vhost_usage(prgname);
508 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
509 ETH_VMDQ_ACCEPT_BROADCAST |
510 ETH_VMDQ_ACCEPT_MULTICAST;
515 /* Enable/disable vm2vm comms. */
516 if (!strncmp(long_option[option_index].name, "vm2vm",
518 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
520 RTE_LOG(INFO, VHOST_CONFIG,
521 "Invalid argument for "
523 us_vhost_usage(prgname);
526 vm2vm_mode = (vm2vm_type)ret;
530 /* Enable/disable retries on RX. */
531 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
532 ret = parse_num_opt(optarg, 1);
534 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
535 us_vhost_usage(prgname);
542 /* Enable/disable TX checksum offload. */
543 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
544 ret = parse_num_opt(optarg, 1);
546 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
547 us_vhost_usage(prgname);
550 enable_tx_csum = ret;
553 /* Enable/disable TSO offload. */
554 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
555 ret = parse_num_opt(optarg, 1);
557 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
558 us_vhost_usage(prgname);
564 /* Specify the retries delay time (in useconds) on RX. */
565 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
566 ret = parse_num_opt(optarg, INT32_MAX);
568 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
569 us_vhost_usage(prgname);
572 burst_rx_delay_time = ret;
576 /* Specify the retries number on RX. */
577 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
578 ret = parse_num_opt(optarg, INT32_MAX);
580 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
581 us_vhost_usage(prgname);
584 burst_rx_retry_num = ret;
588 /* Enable/disable RX mergeable buffers. */
589 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
590 ret = parse_num_opt(optarg, 1);
592 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
593 us_vhost_usage(prgname);
598 vmdq_conf_default.rxmode.jumbo_frame = 1;
599 vmdq_conf_default.rxmode.max_rx_pkt_len
600 = JUMBO_FRAME_MAX_SIZE;
605 /* Enable/disable stats. */
606 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
607 ret = parse_num_opt(optarg, INT32_MAX);
609 RTE_LOG(INFO, VHOST_CONFIG,
610 "Invalid argument for stats [0..N]\n");
611 us_vhost_usage(prgname);
618 /* Set socket file path. */
619 if (!strncmp(long_option[option_index].name,
620 "socket-file", MAX_LONG_OPT_SZ)) {
621 if (us_vhost_parse_socket_path(optarg) == -1) {
622 RTE_LOG(INFO, VHOST_CONFIG,
623 "Invalid argument for socket name (Max %d characters)\n",
625 us_vhost_usage(prgname);
632 /* Invalid option - print options. */
634 us_vhost_usage(prgname);
639 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
640 if (enabled_port_mask & (1 << i))
641 ports[num_ports++] = i;
644 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
645 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
646 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
654 * Update the global var NUM_PORTS and array PORTS according to system ports number
655 * and return valid ports number
657 static unsigned check_ports_num(unsigned nb_ports)
659 unsigned valid_num_ports = num_ports;
662 if (num_ports > nb_ports) {
663 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
664 num_ports, nb_ports);
665 num_ports = nb_ports;
668 for (portid = 0; portid < num_ports; portid ++) {
669 if (ports[portid] >= nb_ports) {
670 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
671 ports[portid], (nb_ports - 1));
672 ports[portid] = INVALID_PORT_ID;
676 return valid_num_ports;
679 static __rte_always_inline struct vhost_dev *
680 find_vhost_dev(struct ether_addr *mac)
682 struct vhost_dev *vdev;
684 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
685 if (vdev->ready == DEVICE_RX &&
686 is_same_ether_addr(mac, &vdev->mac_address))
694 * This function learns the MAC address of the device and registers this along with a
695 * vlan tag to a VMDQ.
698 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
700 struct ether_hdr *pkt_hdr;
703 /* Learn MAC address of guest device from packet */
704 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
706 if (find_vhost_dev(&pkt_hdr->s_addr)) {
707 RTE_LOG(ERR, VHOST_DATA,
708 "(%d) device is using a registered MAC!\n",
713 for (i = 0; i < ETHER_ADDR_LEN; i++)
714 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
716 /* vlan_tag currently uses the device_id. */
717 vdev->vlan_tag = vlan_tags[vdev->vid];
719 /* Print out VMDQ registration info. */
720 RTE_LOG(INFO, VHOST_DATA,
721 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
723 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
724 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
725 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
728 /* Register the MAC address. */
729 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
730 (uint32_t)vdev->vid + vmdq_pool_base);
732 RTE_LOG(ERR, VHOST_DATA,
733 "(%d) failed to add device MAC address to VMDQ\n",
736 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
738 /* Set device as ready for RX. */
739 vdev->ready = DEVICE_RX;
745 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
746 * queue before disabling RX on the device.
749 unlink_vmdq(struct vhost_dev *vdev)
753 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
755 if (vdev->ready == DEVICE_RX) {
756 /*clear MAC and VLAN settings*/
757 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
758 for (i = 0; i < 6; i++)
759 vdev->mac_address.addr_bytes[i] = 0;
763 /*Clear out the receive buffers*/
764 rx_count = rte_eth_rx_burst(ports[0],
765 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
768 for (i = 0; i < rx_count; i++)
769 rte_pktmbuf_free(pkts_burst[i]);
771 rx_count = rte_eth_rx_burst(ports[0],
772 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
775 vdev->ready = DEVICE_MAC_LEARNING;
779 static __rte_always_inline void
780 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
785 if (builtin_net_driver) {
786 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
788 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
792 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
793 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
794 src_vdev->stats.tx_total++;
795 src_vdev->stats.tx += ret;
800 * Check if the packet destination MAC address is for a local device. If so then put
801 * the packet on that devices RX queue. If not then return.
803 static __rte_always_inline int
804 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
806 struct ether_hdr *pkt_hdr;
807 struct vhost_dev *dst_vdev;
809 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
811 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
815 if (vdev->vid == dst_vdev->vid) {
816 RTE_LOG_DP(DEBUG, VHOST_DATA,
817 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
822 RTE_LOG_DP(DEBUG, VHOST_DATA,
823 "(%d) TX: MAC address is local\n", dst_vdev->vid);
825 if (unlikely(dst_vdev->remove)) {
826 RTE_LOG_DP(DEBUG, VHOST_DATA,
827 "(%d) device is marked for removal\n", dst_vdev->vid);
831 virtio_xmit(dst_vdev, vdev, m);
836 * Check if the destination MAC of a packet is one local VM,
837 * and get its vlan tag, and offset if it is.
839 static __rte_always_inline int
840 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
841 uint32_t *offset, uint16_t *vlan_tag)
843 struct vhost_dev *dst_vdev;
844 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
846 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
850 if (vdev->vid == dst_vdev->vid) {
851 RTE_LOG_DP(DEBUG, VHOST_DATA,
852 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
858 * HW vlan strip will reduce the packet length
859 * by minus length of vlan tag, so need restore
860 * the packet length by plus it.
863 *vlan_tag = vlan_tags[vdev->vid];
865 RTE_LOG_DP(DEBUG, VHOST_DATA,
866 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
867 vdev->vid, dst_vdev->vid, *vlan_tag);
873 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
875 if (ol_flags & PKT_TX_IPV4)
876 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
877 else /* assume ethertype == ETHER_TYPE_IPv6 */
878 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
881 static void virtio_tx_offload(struct rte_mbuf *m)
884 struct ipv4_hdr *ipv4_hdr = NULL;
885 struct tcp_hdr *tcp_hdr = NULL;
886 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
888 l3_hdr = (char *)eth_hdr + m->l2_len;
890 if (m->ol_flags & PKT_TX_IPV4) {
892 ipv4_hdr->hdr_checksum = 0;
893 m->ol_flags |= PKT_TX_IP_CKSUM;
896 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
897 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
901 free_pkts(struct rte_mbuf **pkts, uint16_t n)
904 rte_pktmbuf_free(pkts[n]);
907 static __rte_always_inline void
908 do_drain_mbuf_table(struct mbuf_table *tx_q)
912 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
913 tx_q->m_table, tx_q->len);
914 if (unlikely(count < tx_q->len))
915 free_pkts(&tx_q->m_table[count], tx_q->len - count);
921 * This function routes the TX packet to the correct interface. This
922 * may be a local device or the physical port.
924 static __rte_always_inline void
925 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
927 struct mbuf_table *tx_q;
929 const uint16_t lcore_id = rte_lcore_id();
930 struct ether_hdr *nh;
933 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
934 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
935 struct vhost_dev *vdev2;
937 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
938 virtio_xmit(vdev2, vdev, m);
943 /*check if destination is local VM*/
944 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
949 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
950 if (unlikely(find_local_dest(vdev, m, &offset,
957 RTE_LOG_DP(DEBUG, VHOST_DATA,
958 "(%d) TX: MAC address is external\n", vdev->vid);
962 /*Add packet to the port tx queue*/
963 tx_q = &lcore_tx_queue[lcore_id];
965 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
966 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
967 /* Guest has inserted the vlan tag. */
968 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
969 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
970 if ((vm2vm_mode == VM2VM_HARDWARE) &&
971 (vh->vlan_tci != vlan_tag_be))
972 vh->vlan_tci = vlan_tag_be;
974 m->ol_flags |= PKT_TX_VLAN_PKT;
977 * Find the right seg to adjust the data len when offset is
978 * bigger than tail room size.
980 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
981 if (likely(offset <= rte_pktmbuf_tailroom(m)))
982 m->data_len += offset;
984 struct rte_mbuf *seg = m;
986 while ((seg->next != NULL) &&
987 (offset > rte_pktmbuf_tailroom(seg)))
990 seg->data_len += offset;
992 m->pkt_len += offset;
995 m->vlan_tci = vlan_tag;
998 if (m->ol_flags & PKT_TX_TCP_SEG)
999 virtio_tx_offload(m);
1001 tx_q->m_table[tx_q->len++] = m;
1003 vdev->stats.tx_total++;
1007 if (unlikely(tx_q->len == MAX_PKT_BURST))
1008 do_drain_mbuf_table(tx_q);
1012 static __rte_always_inline void
1013 drain_mbuf_table(struct mbuf_table *tx_q)
1015 static uint64_t prev_tsc;
1021 cur_tsc = rte_rdtsc();
1022 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1025 RTE_LOG_DP(DEBUG, VHOST_DATA,
1026 "TX queue drained after timeout with burst size %u\n",
1028 do_drain_mbuf_table(tx_q);
1032 static __rte_always_inline void
1033 drain_eth_rx(struct vhost_dev *vdev)
1035 uint16_t rx_count, enqueue_count;
1036 struct rte_mbuf *pkts[MAX_PKT_BURST];
1038 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1039 pkts, MAX_PKT_BURST);
1044 * When "enable_retry" is set, here we wait and retry when there
1045 * is no enough free slots in the queue to hold @rx_count packets,
1046 * to diminish packet loss.
1049 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1053 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1054 rte_delay_us(burst_rx_delay_time);
1055 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1061 if (builtin_net_driver) {
1062 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1065 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1069 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1070 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1073 free_pkts(pkts, rx_count);
1076 static __rte_always_inline void
1077 drain_virtio_tx(struct vhost_dev *vdev)
1079 struct rte_mbuf *pkts[MAX_PKT_BURST];
1083 if (builtin_net_driver) {
1084 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1085 pkts, MAX_PKT_BURST);
1087 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1088 mbuf_pool, pkts, MAX_PKT_BURST);
1091 /* setup VMDq for the first packet */
1092 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1093 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1094 free_pkts(pkts, count);
1097 for (i = 0; i < count; ++i)
1098 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1102 * Main function of vhost-switch. It basically does:
1104 * for each vhost device {
1107 * Which drains the host eth Rx queue linked to the vhost device,
1108 * and deliver all of them to guest virito Rx ring associated with
1109 * this vhost device.
1111 * - drain_virtio_tx()
1113 * Which drains the guest virtio Tx queue and deliver all of them
1114 * to the target, which could be another vhost device, or the
1115 * physical eth dev. The route is done in function "virtio_tx_route".
1119 switch_worker(void *arg __rte_unused)
1122 unsigned lcore_id = rte_lcore_id();
1123 struct vhost_dev *vdev;
1124 struct mbuf_table *tx_q;
1126 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1128 tx_q = &lcore_tx_queue[lcore_id];
1129 for (i = 0; i < rte_lcore_count(); i++) {
1130 if (lcore_ids[i] == lcore_id) {
1137 drain_mbuf_table(tx_q);
1140 * Inform the configuration core that we have exited the
1141 * linked list and that no devices are in use if requested.
1143 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1144 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1147 * Process vhost devices
1149 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1151 if (unlikely(vdev->remove)) {
1153 vdev->ready = DEVICE_SAFE_REMOVE;
1157 if (likely(vdev->ready == DEVICE_RX))
1160 if (likely(!vdev->remove))
1161 drain_virtio_tx(vdev);
1169 * Remove a device from the specific data core linked list and from the
1170 * main linked list. Synchonization occurs through the use of the
1171 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1172 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1175 destroy_device(int vid)
1177 struct vhost_dev *vdev = NULL;
1180 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1181 if (vdev->vid == vid)
1186 /*set the remove flag. */
1188 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1192 if (builtin_net_driver)
1193 vs_vhost_net_remove(vdev);
1195 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1197 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1200 /* Set the dev_removal_flag on each lcore. */
1201 RTE_LCORE_FOREACH_SLAVE(lcore)
1202 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1205 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1206 * we can be sure that they can no longer access the device removed
1207 * from the linked lists and that the devices are no longer in use.
1209 RTE_LCORE_FOREACH_SLAVE(lcore) {
1210 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1214 lcore_info[vdev->coreid].device_num--;
1216 RTE_LOG(INFO, VHOST_DATA,
1217 "(%d) device has been removed from data core\n",
1224 * A new device is added to a data core. First the device is added to the main linked list
1225 * and the allocated to a specific data core.
1230 int lcore, core_add = 0;
1231 uint32_t device_num_min = num_devices;
1232 struct vhost_dev *vdev;
1234 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1236 RTE_LOG(INFO, VHOST_DATA,
1237 "(%d) couldn't allocate memory for vhost dev\n",
1243 if (builtin_net_driver)
1244 vs_vhost_net_setup(vdev);
1246 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1247 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1249 /*reset ready flag*/
1250 vdev->ready = DEVICE_MAC_LEARNING;
1253 /* Find a suitable lcore to add the device. */
1254 RTE_LCORE_FOREACH_SLAVE(lcore) {
1255 if (lcore_info[lcore].device_num < device_num_min) {
1256 device_num_min = lcore_info[lcore].device_num;
1260 vdev->coreid = core_add;
1262 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1264 lcore_info[vdev->coreid].device_num++;
1266 /* Disable notifications. */
1267 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1268 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1270 RTE_LOG(INFO, VHOST_DATA,
1271 "(%d) device has been added to data core %d\n",
1278 * These callback allow devices to be added to the data core when configuration
1279 * has been fully complete.
1281 static const struct vhost_device_ops virtio_net_device_ops =
1283 .new_device = new_device,
1284 .destroy_device = destroy_device,
1288 * This is a thread will wake up after a period to print stats if the user has
1294 struct vhost_dev *vdev;
1295 uint64_t tx_dropped, rx_dropped;
1296 uint64_t tx, tx_total, rx, rx_total;
1297 const char clr[] = { 27, '[', '2', 'J', '\0' };
1298 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1301 sleep(enable_stats);
1303 /* Clear screen and move to top left */
1304 printf("%s%s\n", clr, top_left);
1305 printf("Device statistics =================================\n");
1307 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1308 tx_total = vdev->stats.tx_total;
1309 tx = vdev->stats.tx;
1310 tx_dropped = tx_total - tx;
1312 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1313 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1314 rx_dropped = rx_total - rx;
1316 printf("Statistics for device %d\n"
1317 "-----------------------\n"
1318 "TX total: %" PRIu64 "\n"
1319 "TX dropped: %" PRIu64 "\n"
1320 "TX successful: %" PRIu64 "\n"
1321 "RX total: %" PRIu64 "\n"
1322 "RX dropped: %" PRIu64 "\n"
1323 "RX successful: %" PRIu64 "\n",
1325 tx_total, tx_dropped, tx,
1326 rx_total, rx_dropped, rx);
1329 printf("===================================================\n");
1334 unregister_drivers(int socket_num)
1338 for (i = 0; i < socket_num; i++) {
1339 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1341 RTE_LOG(ERR, VHOST_CONFIG,
1342 "Fail to unregister vhost driver for %s.\n",
1343 socket_files + i * PATH_MAX);
1347 /* When we receive a INT signal, unregister vhost driver */
1349 sigint_handler(__rte_unused int signum)
1351 /* Unregister vhost driver. */
1352 unregister_drivers(nb_sockets);
1358 * While creating an mbuf pool, one key thing is to figure out how
1359 * many mbuf entries is enough for our use. FYI, here are some
1362 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1364 * - For each switch core (A CPU core does the packet switch), we need
1365 * also make some reservation for receiving the packets from virtio
1366 * Tx queue. How many is enough depends on the usage. It's normally
1367 * a simple calculation like following:
1369 * MAX_PKT_BURST * max packet size / mbuf size
1371 * So, we definitely need allocate more mbufs when TSO is enabled.
1373 * - Similarly, for each switching core, we should serve @nr_rx_desc
1374 * mbufs for receiving the packets from physical NIC device.
1376 * - We also need make sure, for each switch core, we have allocated
1377 * enough mbufs to fill up the mbuf cache.
1380 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1381 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1384 uint32_t nr_mbufs_per_core;
1385 uint32_t mtu = 1500;
1392 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1393 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1394 nr_mbufs_per_core += nr_rx_desc;
1395 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1397 nr_mbufs = nr_queues * nr_rx_desc;
1398 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1399 nr_mbufs *= nr_port;
1401 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1402 nr_mbuf_cache, 0, mbuf_size,
1404 if (mbuf_pool == NULL)
1405 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1409 * Main function, does initialisation and calls the per-lcore functions.
1412 main(int argc, char *argv[])
1414 unsigned lcore_id, core_id = 0;
1415 unsigned nb_ports, valid_num_ports;
1418 static pthread_t tid;
1419 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1422 signal(SIGINT, sigint_handler);
1425 ret = rte_eal_init(argc, argv);
1427 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1431 /* parse app arguments */
1432 ret = us_vhost_parse_args(argc, argv);
1434 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1436 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1437 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1439 if (rte_lcore_is_enabled(lcore_id))
1440 lcore_ids[core_id++] = lcore_id;
1443 if (rte_lcore_count() > RTE_MAX_LCORE)
1444 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1446 /* Get the number of physical ports. */
1447 nb_ports = rte_eth_dev_count();
1450 * Update the global var NUM_PORTS and global array PORTS
1451 * and get value of var VALID_NUM_PORTS according to system ports number
1453 valid_num_ports = check_ports_num(nb_ports);
1455 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1456 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1457 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1462 * FIXME: here we are trying to allocate mbufs big enough for
1463 * @MAX_QUEUES, but the truth is we're never going to use that
1464 * many queues here. We probably should only do allocation for
1465 * those queues we are going to use.
1467 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1468 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1470 if (vm2vm_mode == VM2VM_HARDWARE) {
1471 /* Enable VT loop back to let L2 switch to do it. */
1472 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1473 RTE_LOG(DEBUG, VHOST_CONFIG,
1474 "Enable loop back for L2 switch in vmdq.\n");
1477 /* initialize all ports */
1478 for (portid = 0; portid < nb_ports; portid++) {
1479 /* skip ports that are not enabled */
1480 if ((enabled_port_mask & (1 << portid)) == 0) {
1481 RTE_LOG(INFO, VHOST_PORT,
1482 "Skipping disabled port %d\n", portid);
1485 if (port_init(portid) != 0)
1486 rte_exit(EXIT_FAILURE,
1487 "Cannot initialize network ports\n");
1490 /* Enable stats if the user option is set. */
1492 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1494 rte_exit(EXIT_FAILURE,
1495 "Cannot create print-stats thread\n");
1497 /* Set thread_name for aid in debugging. */
1498 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1499 ret = rte_thread_setname(tid, thread_name);
1501 RTE_LOG(DEBUG, VHOST_CONFIG,
1502 "Cannot set print-stats name\n");
1505 /* Launch all data cores. */
1506 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1507 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1510 flags |= RTE_VHOST_USER_CLIENT;
1512 if (dequeue_zero_copy)
1513 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1515 /* Register vhost user driver to handle vhost messages. */
1516 for (i = 0; i < nb_sockets; i++) {
1517 char *file = socket_files + i * PATH_MAX;
1518 ret = rte_vhost_driver_register(file, flags);
1520 unregister_drivers(i);
1521 rte_exit(EXIT_FAILURE,
1522 "vhost driver register failure.\n");
1525 if (builtin_net_driver)
1526 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1528 if (mergeable == 0) {
1529 rte_vhost_driver_disable_features(file,
1530 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1533 if (enable_tx_csum == 0) {
1534 rte_vhost_driver_disable_features(file,
1535 1ULL << VIRTIO_NET_F_CSUM);
1538 if (enable_tso == 0) {
1539 rte_vhost_driver_disable_features(file,
1540 1ULL << VIRTIO_NET_F_HOST_TSO4);
1541 rte_vhost_driver_disable_features(file,
1542 1ULL << VIRTIO_NET_F_HOST_TSO6);
1543 rte_vhost_driver_disable_features(file,
1544 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1545 rte_vhost_driver_disable_features(file,
1546 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1550 rte_vhost_driver_enable_features(file,
1551 1ULL << VIRTIO_NET_F_CTRL_RX);
1554 ret = rte_vhost_driver_callback_register(file,
1555 &virtio_net_device_ops);
1557 rte_exit(EXIT_FAILURE,
1558 "failed to register vhost driver callbacks.\n");
1561 if (rte_vhost_driver_start(file) < 0) {
1562 rte_exit(EXIT_FAILURE,
1563 "failed to start vhost driver.\n");
1567 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1568 rte_eal_wait_lcore(lcore_id);