1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Max number of devices. Limited by vmdq. */
59 #define MAX_DEVICES 64
61 /* Size of buffers used for snprintfs. */
62 #define MAX_PRINT_BUFF 6072
64 /* Maximum long option length for option parsing. */
65 #define MAX_LONG_OPT_SZ 64
67 /* mask of enabled ports */
68 static uint32_t enabled_port_mask = 0;
70 /* Promiscuous mode */
71 static uint32_t promiscuous;
73 /* number of devices/queues to support*/
74 static uint32_t num_queues = 0;
75 static uint32_t num_devices;
77 static struct rte_mempool *mbuf_pool;
80 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
87 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
90 static uint32_t enable_stats = 0;
91 /* Enable retries on RX. */
92 static uint32_t enable_retry = 1;
94 /* Disable TX checksum offload */
95 static uint32_t enable_tx_csum;
97 /* Disable TSO offload */
98 static uint32_t enable_tso;
100 static int client_mode;
101 static int dequeue_zero_copy;
103 static int builtin_net_driver;
105 /* Specify timeout (in useconds) between retries on RX. */
106 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
107 /* Specify the number of retries on RX. */
108 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
110 /* Socket file paths. Can be set by user */
111 static char *socket_files;
112 static int nb_sockets;
114 /* empty vmdq configuration structure. Filled in programatically */
115 static struct rte_eth_conf vmdq_conf_default = {
117 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
119 .ignore_offload_bitfield = 1,
121 * VLAN strip is necessary for 1G NIC such as I350,
122 * this fixes bug of ipv4 forwarding in guest can't
123 * forward pakets from one virtio dev to another virtio dev.
125 .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
126 DEV_RX_OFFLOAD_VLAN_STRIP),
130 .mq_mode = ETH_MQ_TX_NONE,
131 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
132 DEV_TX_OFFLOAD_TCP_CKSUM |
133 DEV_TX_OFFLOAD_VLAN_INSERT |
134 DEV_TX_OFFLOAD_MULTI_SEGS |
135 DEV_TX_OFFLOAD_TCP_TSO),
139 * should be overridden separately in code with
143 .nb_queue_pools = ETH_8_POOLS,
144 .enable_default_pool = 0,
147 .pool_map = {{0, 0},},
153 static unsigned lcore_ids[RTE_MAX_LCORE];
154 static uint16_t ports[RTE_MAX_ETHPORTS];
155 static unsigned num_ports = 0; /**< The number of ports specified in command line */
156 static uint16_t num_pf_queues, num_vmdq_queues;
157 static uint16_t vmdq_pool_base, vmdq_queue_base;
158 static uint16_t queues_per_pool;
160 const uint16_t vlan_tags[] = {
161 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
162 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
163 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
164 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
165 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
166 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
167 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
168 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
171 /* ethernet addresses of ports */
172 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
174 static struct vhost_dev_tailq_list vhost_dev_list =
175 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
177 static struct lcore_info lcore_info[RTE_MAX_LCORE];
179 /* Used for queueing bursts of TX packets. */
183 struct rte_mbuf *m_table[MAX_PKT_BURST];
186 /* TX queue for each data core. */
187 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
189 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
190 / US_PER_S * BURST_TX_DRAIN_US)
194 * Builds up the correct configuration for VMDQ VLAN pool map
195 * according to the pool & queue limits.
198 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
200 struct rte_eth_vmdq_rx_conf conf;
201 struct rte_eth_vmdq_rx_conf *def_conf =
202 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
205 memset(&conf, 0, sizeof(conf));
206 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
207 conf.nb_pool_maps = num_devices;
208 conf.enable_loop_back = def_conf->enable_loop_back;
209 conf.rx_mode = def_conf->rx_mode;
211 for (i = 0; i < conf.nb_pool_maps; i++) {
212 conf.pool_map[i].vlan_id = vlan_tags[ i ];
213 conf.pool_map[i].pools = (1UL << i);
216 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
217 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
218 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
223 * Validate the device number according to the max pool number gotten form
224 * dev_info. If the device number is invalid, give the error message and
225 * return -1. Each device must have its own pool.
228 validate_num_devices(uint32_t max_nb_devices)
230 if (num_devices > max_nb_devices) {
231 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
238 * Initialises a given port using global settings and with the rx buffers
239 * coming from the mbuf_pool passed as parameter
242 port_init(uint16_t port)
244 struct rte_eth_dev_info dev_info;
245 struct rte_eth_conf port_conf;
246 struct rte_eth_rxconf *rxconf;
247 struct rte_eth_txconf *txconf;
248 int16_t rx_rings, tx_rings;
249 uint16_t rx_ring_size, tx_ring_size;
253 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
254 rte_eth_dev_info_get (port, &dev_info);
256 rxconf = &dev_info.default_rxconf;
257 txconf = &dev_info.default_txconf;
258 rxconf->rx_drop_en = 1;
259 txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
261 /*configure the number of supported virtio devices based on VMDQ limits */
262 num_devices = dev_info.max_vmdq_pools;
264 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
265 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
268 * When dequeue zero copy is enabled, guest Tx used vring will be
269 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
270 * (tx_ring_size here) must be small enough so that the driver will
271 * hit the free threshold easily and free mbufs timely. Otherwise,
272 * guest Tx vring would be starved.
274 if (dequeue_zero_copy)
277 tx_rings = (uint16_t)rte_lcore_count();
279 retval = validate_num_devices(MAX_DEVICES);
283 /* Get port configuration. */
284 retval = get_eth_conf(&port_conf, num_devices);
287 /* NIC queues are divided into pf queues and vmdq queues. */
288 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
289 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
290 num_vmdq_queues = num_devices * queues_per_pool;
291 num_queues = num_pf_queues + num_vmdq_queues;
292 vmdq_queue_base = dev_info.vmdq_queue_base;
293 vmdq_pool_base = dev_info.vmdq_pool_base;
294 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
295 num_pf_queues, num_devices, queues_per_pool);
297 if (!rte_eth_dev_is_valid_port(port))
300 rx_rings = (uint16_t)dev_info.max_rx_queues;
301 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
302 port_conf.txmode.offloads |=
303 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
304 /* Configure ethernet device. */
305 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
307 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
308 port, strerror(-retval));
312 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
315 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
316 "for port %u: %s.\n", port, strerror(-retval));
319 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
320 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
321 "for Rx queues on port %u.\n", port);
325 /* Setup the queues. */
326 rxconf->offloads = port_conf.rxmode.offloads;
327 for (q = 0; q < rx_rings; q ++) {
328 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
329 rte_eth_dev_socket_id(port),
333 RTE_LOG(ERR, VHOST_PORT,
334 "Failed to setup rx queue %u of port %u: %s.\n",
335 q, port, strerror(-retval));
339 txconf->offloads = port_conf.txmode.offloads;
340 for (q = 0; q < tx_rings; q ++) {
341 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
342 rte_eth_dev_socket_id(port),
345 RTE_LOG(ERR, VHOST_PORT,
346 "Failed to setup tx queue %u of port %u: %s.\n",
347 q, port, strerror(-retval));
352 /* Start the device. */
353 retval = rte_eth_dev_start(port);
355 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
356 port, strerror(-retval));
361 rte_eth_promiscuous_enable(port);
363 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
364 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
365 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
366 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
368 vmdq_ports_eth_addr[port].addr_bytes[0],
369 vmdq_ports_eth_addr[port].addr_bytes[1],
370 vmdq_ports_eth_addr[port].addr_bytes[2],
371 vmdq_ports_eth_addr[port].addr_bytes[3],
372 vmdq_ports_eth_addr[port].addr_bytes[4],
373 vmdq_ports_eth_addr[port].addr_bytes[5]);
379 * Set socket file path.
382 us_vhost_parse_socket_path(const char *q_arg)
384 /* parse number string */
385 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
388 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
389 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
396 * Parse the portmask provided at run time.
399 parse_portmask(const char *portmask)
406 /* parse hexadecimal string */
407 pm = strtoul(portmask, &end, 16);
408 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
419 * Parse num options at run time.
422 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
429 /* parse unsigned int string */
430 num = strtoul(q_arg, &end, 10);
431 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
434 if (num > max_valid_value)
445 us_vhost_usage(const char *prgname)
447 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
449 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
450 " --socket-file <path>\n"
452 " -p PORTMASK: Set mask for ports to be used by application\n"
453 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
454 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
455 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
456 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
457 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
458 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
459 " --socket-file: The path of the socket file.\n"
460 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
461 " --tso [0|1] disable/enable TCP segment offload.\n"
462 " --client register a vhost-user socket as client mode.\n"
463 " --dequeue-zero-copy enables dequeue zero copy\n",
468 * Parse the arguments given in the command line of the application.
471 us_vhost_parse_args(int argc, char **argv)
476 const char *prgname = argv[0];
477 static struct option long_option[] = {
478 {"vm2vm", required_argument, NULL, 0},
479 {"rx-retry", required_argument, NULL, 0},
480 {"rx-retry-delay", required_argument, NULL, 0},
481 {"rx-retry-num", required_argument, NULL, 0},
482 {"mergeable", required_argument, NULL, 0},
483 {"stats", required_argument, NULL, 0},
484 {"socket-file", required_argument, NULL, 0},
485 {"tx-csum", required_argument, NULL, 0},
486 {"tso", required_argument, NULL, 0},
487 {"client", no_argument, &client_mode, 1},
488 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
489 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
493 /* Parse command line */
494 while ((opt = getopt_long(argc, argv, "p:P",
495 long_option, &option_index)) != EOF) {
499 enabled_port_mask = parse_portmask(optarg);
500 if (enabled_port_mask == 0) {
501 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
502 us_vhost_usage(prgname);
509 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
510 ETH_VMDQ_ACCEPT_BROADCAST |
511 ETH_VMDQ_ACCEPT_MULTICAST;
516 /* Enable/disable vm2vm comms. */
517 if (!strncmp(long_option[option_index].name, "vm2vm",
519 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
521 RTE_LOG(INFO, VHOST_CONFIG,
522 "Invalid argument for "
524 us_vhost_usage(prgname);
527 vm2vm_mode = (vm2vm_type)ret;
531 /* Enable/disable retries on RX. */
532 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
533 ret = parse_num_opt(optarg, 1);
535 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
536 us_vhost_usage(prgname);
543 /* Enable/disable TX checksum offload. */
544 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
545 ret = parse_num_opt(optarg, 1);
547 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
548 us_vhost_usage(prgname);
551 enable_tx_csum = ret;
554 /* Enable/disable TSO offload. */
555 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
556 ret = parse_num_opt(optarg, 1);
558 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
559 us_vhost_usage(prgname);
565 /* Specify the retries delay time (in useconds) on RX. */
566 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
567 ret = parse_num_opt(optarg, INT32_MAX);
569 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
570 us_vhost_usage(prgname);
573 burst_rx_delay_time = ret;
577 /* Specify the retries number on RX. */
578 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
579 ret = parse_num_opt(optarg, INT32_MAX);
581 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
582 us_vhost_usage(prgname);
585 burst_rx_retry_num = ret;
589 /* Enable/disable RX mergeable buffers. */
590 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
591 ret = parse_num_opt(optarg, 1);
593 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
594 us_vhost_usage(prgname);
599 vmdq_conf_default.rxmode.offloads |=
600 DEV_RX_OFFLOAD_JUMBO_FRAME;
601 vmdq_conf_default.rxmode.max_rx_pkt_len
602 = JUMBO_FRAME_MAX_SIZE;
607 /* Enable/disable stats. */
608 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
609 ret = parse_num_opt(optarg, INT32_MAX);
611 RTE_LOG(INFO, VHOST_CONFIG,
612 "Invalid argument for stats [0..N]\n");
613 us_vhost_usage(prgname);
620 /* Set socket file path. */
621 if (!strncmp(long_option[option_index].name,
622 "socket-file", MAX_LONG_OPT_SZ)) {
623 if (us_vhost_parse_socket_path(optarg) == -1) {
624 RTE_LOG(INFO, VHOST_CONFIG,
625 "Invalid argument for socket name (Max %d characters)\n",
627 us_vhost_usage(prgname);
634 /* Invalid option - print options. */
636 us_vhost_usage(prgname);
641 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
642 if (enabled_port_mask & (1 << i))
643 ports[num_ports++] = i;
646 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
647 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
648 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
656 * Update the global var NUM_PORTS and array PORTS according to system ports number
657 * and return valid ports number
659 static unsigned check_ports_num(unsigned nb_ports)
661 unsigned valid_num_ports = num_ports;
664 if (num_ports > nb_ports) {
665 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
666 num_ports, nb_ports);
667 num_ports = nb_ports;
670 for (portid = 0; portid < num_ports; portid ++) {
671 if (!rte_eth_dev_is_valid_port(ports[portid])) {
672 RTE_LOG(INFO, VHOST_PORT,
673 "\nSpecified port ID(%u) is not valid\n",
675 ports[portid] = INVALID_PORT_ID;
679 return valid_num_ports;
682 static __rte_always_inline struct vhost_dev *
683 find_vhost_dev(struct ether_addr *mac)
685 struct vhost_dev *vdev;
687 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
688 if (vdev->ready == DEVICE_RX &&
689 is_same_ether_addr(mac, &vdev->mac_address))
697 * This function learns the MAC address of the device and registers this along with a
698 * vlan tag to a VMDQ.
701 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
703 struct ether_hdr *pkt_hdr;
706 /* Learn MAC address of guest device from packet */
707 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
709 if (find_vhost_dev(&pkt_hdr->s_addr)) {
710 RTE_LOG(ERR, VHOST_DATA,
711 "(%d) device is using a registered MAC!\n",
716 for (i = 0; i < ETHER_ADDR_LEN; i++)
717 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
719 /* vlan_tag currently uses the device_id. */
720 vdev->vlan_tag = vlan_tags[vdev->vid];
722 /* Print out VMDQ registration info. */
723 RTE_LOG(INFO, VHOST_DATA,
724 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
726 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
727 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
728 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
731 /* Register the MAC address. */
732 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
733 (uint32_t)vdev->vid + vmdq_pool_base);
735 RTE_LOG(ERR, VHOST_DATA,
736 "(%d) failed to add device MAC address to VMDQ\n",
739 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
741 /* Set device as ready for RX. */
742 vdev->ready = DEVICE_RX;
748 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
749 * queue before disabling RX on the device.
752 unlink_vmdq(struct vhost_dev *vdev)
756 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
758 if (vdev->ready == DEVICE_RX) {
759 /*clear MAC and VLAN settings*/
760 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
761 for (i = 0; i < 6; i++)
762 vdev->mac_address.addr_bytes[i] = 0;
766 /*Clear out the receive buffers*/
767 rx_count = rte_eth_rx_burst(ports[0],
768 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
771 for (i = 0; i < rx_count; i++)
772 rte_pktmbuf_free(pkts_burst[i]);
774 rx_count = rte_eth_rx_burst(ports[0],
775 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
778 vdev->ready = DEVICE_MAC_LEARNING;
782 static __rte_always_inline void
783 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
788 if (builtin_net_driver) {
789 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
791 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
795 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
796 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
797 src_vdev->stats.tx_total++;
798 src_vdev->stats.tx += ret;
803 * Check if the packet destination MAC address is for a local device. If so then put
804 * the packet on that devices RX queue. If not then return.
806 static __rte_always_inline int
807 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
809 struct ether_hdr *pkt_hdr;
810 struct vhost_dev *dst_vdev;
812 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
814 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
818 if (vdev->vid == dst_vdev->vid) {
819 RTE_LOG_DP(DEBUG, VHOST_DATA,
820 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
825 RTE_LOG_DP(DEBUG, VHOST_DATA,
826 "(%d) TX: MAC address is local\n", dst_vdev->vid);
828 if (unlikely(dst_vdev->remove)) {
829 RTE_LOG_DP(DEBUG, VHOST_DATA,
830 "(%d) device is marked for removal\n", dst_vdev->vid);
834 virtio_xmit(dst_vdev, vdev, m);
839 * Check if the destination MAC of a packet is one local VM,
840 * and get its vlan tag, and offset if it is.
842 static __rte_always_inline int
843 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
844 uint32_t *offset, uint16_t *vlan_tag)
846 struct vhost_dev *dst_vdev;
847 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
849 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
853 if (vdev->vid == dst_vdev->vid) {
854 RTE_LOG_DP(DEBUG, VHOST_DATA,
855 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
861 * HW vlan strip will reduce the packet length
862 * by minus length of vlan tag, so need restore
863 * the packet length by plus it.
866 *vlan_tag = vlan_tags[vdev->vid];
868 RTE_LOG_DP(DEBUG, VHOST_DATA,
869 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
870 vdev->vid, dst_vdev->vid, *vlan_tag);
876 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
878 if (ol_flags & PKT_TX_IPV4)
879 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
880 else /* assume ethertype == ETHER_TYPE_IPv6 */
881 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
884 static void virtio_tx_offload(struct rte_mbuf *m)
887 struct ipv4_hdr *ipv4_hdr = NULL;
888 struct tcp_hdr *tcp_hdr = NULL;
889 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
891 l3_hdr = (char *)eth_hdr + m->l2_len;
893 if (m->ol_flags & PKT_TX_IPV4) {
895 ipv4_hdr->hdr_checksum = 0;
896 m->ol_flags |= PKT_TX_IP_CKSUM;
899 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
900 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
904 free_pkts(struct rte_mbuf **pkts, uint16_t n)
907 rte_pktmbuf_free(pkts[n]);
910 static __rte_always_inline void
911 do_drain_mbuf_table(struct mbuf_table *tx_q)
915 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
916 tx_q->m_table, tx_q->len);
917 if (unlikely(count < tx_q->len))
918 free_pkts(&tx_q->m_table[count], tx_q->len - count);
924 * This function routes the TX packet to the correct interface. This
925 * may be a local device or the physical port.
927 static __rte_always_inline void
928 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
930 struct mbuf_table *tx_q;
932 const uint16_t lcore_id = rte_lcore_id();
933 struct ether_hdr *nh;
936 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
937 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
938 struct vhost_dev *vdev2;
940 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
942 virtio_xmit(vdev2, vdev, m);
947 /*check if destination is local VM*/
948 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
953 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
954 if (unlikely(find_local_dest(vdev, m, &offset,
961 RTE_LOG_DP(DEBUG, VHOST_DATA,
962 "(%d) TX: MAC address is external\n", vdev->vid);
966 /*Add packet to the port tx queue*/
967 tx_q = &lcore_tx_queue[lcore_id];
969 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
970 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
971 /* Guest has inserted the vlan tag. */
972 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
973 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
974 if ((vm2vm_mode == VM2VM_HARDWARE) &&
975 (vh->vlan_tci != vlan_tag_be))
976 vh->vlan_tci = vlan_tag_be;
978 m->ol_flags |= PKT_TX_VLAN_PKT;
981 * Find the right seg to adjust the data len when offset is
982 * bigger than tail room size.
984 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
985 if (likely(offset <= rte_pktmbuf_tailroom(m)))
986 m->data_len += offset;
988 struct rte_mbuf *seg = m;
990 while ((seg->next != NULL) &&
991 (offset > rte_pktmbuf_tailroom(seg)))
994 seg->data_len += offset;
996 m->pkt_len += offset;
999 m->vlan_tci = vlan_tag;
1002 if (m->ol_flags & PKT_TX_TCP_SEG)
1003 virtio_tx_offload(m);
1005 tx_q->m_table[tx_q->len++] = m;
1007 vdev->stats.tx_total++;
1011 if (unlikely(tx_q->len == MAX_PKT_BURST))
1012 do_drain_mbuf_table(tx_q);
1016 static __rte_always_inline void
1017 drain_mbuf_table(struct mbuf_table *tx_q)
1019 static uint64_t prev_tsc;
1025 cur_tsc = rte_rdtsc();
1026 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1029 RTE_LOG_DP(DEBUG, VHOST_DATA,
1030 "TX queue drained after timeout with burst size %u\n",
1032 do_drain_mbuf_table(tx_q);
1036 static __rte_always_inline void
1037 drain_eth_rx(struct vhost_dev *vdev)
1039 uint16_t rx_count, enqueue_count;
1040 struct rte_mbuf *pkts[MAX_PKT_BURST];
1042 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1043 pkts, MAX_PKT_BURST);
1048 * When "enable_retry" is set, here we wait and retry when there
1049 * is no enough free slots in the queue to hold @rx_count packets,
1050 * to diminish packet loss.
1053 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1057 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1058 rte_delay_us(burst_rx_delay_time);
1059 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1065 if (builtin_net_driver) {
1066 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1069 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1073 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1074 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1077 free_pkts(pkts, rx_count);
1080 static __rte_always_inline void
1081 drain_virtio_tx(struct vhost_dev *vdev)
1083 struct rte_mbuf *pkts[MAX_PKT_BURST];
1087 if (builtin_net_driver) {
1088 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1089 pkts, MAX_PKT_BURST);
1091 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1092 mbuf_pool, pkts, MAX_PKT_BURST);
1095 /* setup VMDq for the first packet */
1096 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1097 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1098 free_pkts(pkts, count);
1101 for (i = 0; i < count; ++i)
1102 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1106 * Main function of vhost-switch. It basically does:
1108 * for each vhost device {
1111 * Which drains the host eth Rx queue linked to the vhost device,
1112 * and deliver all of them to guest virito Rx ring associated with
1113 * this vhost device.
1115 * - drain_virtio_tx()
1117 * Which drains the guest virtio Tx queue and deliver all of them
1118 * to the target, which could be another vhost device, or the
1119 * physical eth dev. The route is done in function "virtio_tx_route".
1123 switch_worker(void *arg __rte_unused)
1126 unsigned lcore_id = rte_lcore_id();
1127 struct vhost_dev *vdev;
1128 struct mbuf_table *tx_q;
1130 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1132 tx_q = &lcore_tx_queue[lcore_id];
1133 for (i = 0; i < rte_lcore_count(); i++) {
1134 if (lcore_ids[i] == lcore_id) {
1141 drain_mbuf_table(tx_q);
1144 * Inform the configuration core that we have exited the
1145 * linked list and that no devices are in use if requested.
1147 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1148 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1151 * Process vhost devices
1153 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1155 if (unlikely(vdev->remove)) {
1157 vdev->ready = DEVICE_SAFE_REMOVE;
1161 if (likely(vdev->ready == DEVICE_RX))
1164 if (likely(!vdev->remove))
1165 drain_virtio_tx(vdev);
1173 * Remove a device from the specific data core linked list and from the
1174 * main linked list. Synchonization occurs through the use of the
1175 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1176 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1179 destroy_device(int vid)
1181 struct vhost_dev *vdev = NULL;
1184 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1185 if (vdev->vid == vid)
1190 /*set the remove flag. */
1192 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1196 if (builtin_net_driver)
1197 vs_vhost_net_remove(vdev);
1199 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1201 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1204 /* Set the dev_removal_flag on each lcore. */
1205 RTE_LCORE_FOREACH_SLAVE(lcore)
1206 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1209 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1210 * we can be sure that they can no longer access the device removed
1211 * from the linked lists and that the devices are no longer in use.
1213 RTE_LCORE_FOREACH_SLAVE(lcore) {
1214 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1218 lcore_info[vdev->coreid].device_num--;
1220 RTE_LOG(INFO, VHOST_DATA,
1221 "(%d) device has been removed from data core\n",
1228 * A new device is added to a data core. First the device is added to the main linked list
1229 * and the allocated to a specific data core.
1234 int lcore, core_add = 0;
1235 uint32_t device_num_min = num_devices;
1236 struct vhost_dev *vdev;
1238 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1240 RTE_LOG(INFO, VHOST_DATA,
1241 "(%d) couldn't allocate memory for vhost dev\n",
1247 if (builtin_net_driver)
1248 vs_vhost_net_setup(vdev);
1250 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1251 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1253 /*reset ready flag*/
1254 vdev->ready = DEVICE_MAC_LEARNING;
1257 /* Find a suitable lcore to add the device. */
1258 RTE_LCORE_FOREACH_SLAVE(lcore) {
1259 if (lcore_info[lcore].device_num < device_num_min) {
1260 device_num_min = lcore_info[lcore].device_num;
1264 vdev->coreid = core_add;
1266 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1268 lcore_info[vdev->coreid].device_num++;
1270 /* Disable notifications. */
1271 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1272 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1274 RTE_LOG(INFO, VHOST_DATA,
1275 "(%d) device has been added to data core %d\n",
1282 * These callback allow devices to be added to the data core when configuration
1283 * has been fully complete.
1285 static const struct vhost_device_ops virtio_net_device_ops =
1287 .new_device = new_device,
1288 .destroy_device = destroy_device,
1292 * This is a thread will wake up after a period to print stats if the user has
1298 struct vhost_dev *vdev;
1299 uint64_t tx_dropped, rx_dropped;
1300 uint64_t tx, tx_total, rx, rx_total;
1301 const char clr[] = { 27, '[', '2', 'J', '\0' };
1302 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1305 sleep(enable_stats);
1307 /* Clear screen and move to top left */
1308 printf("%s%s\n", clr, top_left);
1309 printf("Device statistics =================================\n");
1311 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1312 tx_total = vdev->stats.tx_total;
1313 tx = vdev->stats.tx;
1314 tx_dropped = tx_total - tx;
1316 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1317 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1318 rx_dropped = rx_total - rx;
1320 printf("Statistics for device %d\n"
1321 "-----------------------\n"
1322 "TX total: %" PRIu64 "\n"
1323 "TX dropped: %" PRIu64 "\n"
1324 "TX successful: %" PRIu64 "\n"
1325 "RX total: %" PRIu64 "\n"
1326 "RX dropped: %" PRIu64 "\n"
1327 "RX successful: %" PRIu64 "\n",
1329 tx_total, tx_dropped, tx,
1330 rx_total, rx_dropped, rx);
1333 printf("===================================================\n");
1338 unregister_drivers(int socket_num)
1342 for (i = 0; i < socket_num; i++) {
1343 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1345 RTE_LOG(ERR, VHOST_CONFIG,
1346 "Fail to unregister vhost driver for %s.\n",
1347 socket_files + i * PATH_MAX);
1351 /* When we receive a INT signal, unregister vhost driver */
1353 sigint_handler(__rte_unused int signum)
1355 /* Unregister vhost driver. */
1356 unregister_drivers(nb_sockets);
1362 * While creating an mbuf pool, one key thing is to figure out how
1363 * many mbuf entries is enough for our use. FYI, here are some
1366 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1368 * - For each switch core (A CPU core does the packet switch), we need
1369 * also make some reservation for receiving the packets from virtio
1370 * Tx queue. How many is enough depends on the usage. It's normally
1371 * a simple calculation like following:
1373 * MAX_PKT_BURST * max packet size / mbuf size
1375 * So, we definitely need allocate more mbufs when TSO is enabled.
1377 * - Similarly, for each switching core, we should serve @nr_rx_desc
1378 * mbufs for receiving the packets from physical NIC device.
1380 * - We also need make sure, for each switch core, we have allocated
1381 * enough mbufs to fill up the mbuf cache.
1384 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1385 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1388 uint32_t nr_mbufs_per_core;
1389 uint32_t mtu = 1500;
1396 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1397 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1398 nr_mbufs_per_core += nr_rx_desc;
1399 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1401 nr_mbufs = nr_queues * nr_rx_desc;
1402 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1403 nr_mbufs *= nr_port;
1405 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1406 nr_mbuf_cache, 0, mbuf_size,
1408 if (mbuf_pool == NULL)
1409 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1413 * Main function, does initialisation and calls the per-lcore functions.
1416 main(int argc, char *argv[])
1418 unsigned lcore_id, core_id = 0;
1419 unsigned nb_ports, valid_num_ports;
1422 static pthread_t tid;
1423 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1426 signal(SIGINT, sigint_handler);
1429 ret = rte_eal_init(argc, argv);
1431 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1435 /* parse app arguments */
1436 ret = us_vhost_parse_args(argc, argv);
1438 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1440 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1441 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1443 if (rte_lcore_is_enabled(lcore_id))
1444 lcore_ids[core_id++] = lcore_id;
1447 if (rte_lcore_count() > RTE_MAX_LCORE)
1448 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1450 /* Get the number of physical ports. */
1451 nb_ports = rte_eth_dev_count_avail();
1454 * Update the global var NUM_PORTS and global array PORTS
1455 * and get value of var VALID_NUM_PORTS according to system ports number
1457 valid_num_ports = check_ports_num(nb_ports);
1459 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1460 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1461 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1466 * FIXME: here we are trying to allocate mbufs big enough for
1467 * @MAX_QUEUES, but the truth is we're never going to use that
1468 * many queues here. We probably should only do allocation for
1469 * those queues we are going to use.
1471 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1472 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1474 if (vm2vm_mode == VM2VM_HARDWARE) {
1475 /* Enable VT loop back to let L2 switch to do it. */
1476 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1477 RTE_LOG(DEBUG, VHOST_CONFIG,
1478 "Enable loop back for L2 switch in vmdq.\n");
1481 /* initialize all ports */
1482 RTE_ETH_FOREACH_DEV(portid) {
1483 /* skip ports that are not enabled */
1484 if ((enabled_port_mask & (1 << portid)) == 0) {
1485 RTE_LOG(INFO, VHOST_PORT,
1486 "Skipping disabled port %d\n", portid);
1489 if (port_init(portid) != 0)
1490 rte_exit(EXIT_FAILURE,
1491 "Cannot initialize network ports\n");
1494 /* Enable stats if the user option is set. */
1496 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1498 rte_exit(EXIT_FAILURE,
1499 "Cannot create print-stats thread\n");
1501 /* Set thread_name for aid in debugging. */
1502 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1503 ret = rte_thread_setname(tid, thread_name);
1505 RTE_LOG(DEBUG, VHOST_CONFIG,
1506 "Cannot set print-stats name\n");
1509 /* Launch all data cores. */
1510 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1511 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1514 flags |= RTE_VHOST_USER_CLIENT;
1516 if (dequeue_zero_copy)
1517 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1519 /* Register vhost user driver to handle vhost messages. */
1520 for (i = 0; i < nb_sockets; i++) {
1521 char *file = socket_files + i * PATH_MAX;
1522 ret = rte_vhost_driver_register(file, flags);
1524 unregister_drivers(i);
1525 rte_exit(EXIT_FAILURE,
1526 "vhost driver register failure.\n");
1529 if (builtin_net_driver)
1530 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1532 if (mergeable == 0) {
1533 rte_vhost_driver_disable_features(file,
1534 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1537 if (enable_tx_csum == 0) {
1538 rte_vhost_driver_disable_features(file,
1539 1ULL << VIRTIO_NET_F_CSUM);
1542 if (enable_tso == 0) {
1543 rte_vhost_driver_disable_features(file,
1544 1ULL << VIRTIO_NET_F_HOST_TSO4);
1545 rte_vhost_driver_disable_features(file,
1546 1ULL << VIRTIO_NET_F_HOST_TSO6);
1547 rte_vhost_driver_disable_features(file,
1548 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1549 rte_vhost_driver_disable_features(file,
1550 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1554 rte_vhost_driver_enable_features(file,
1555 1ULL << VIRTIO_NET_F_CTRL_RX);
1558 ret = rte_vhost_driver_callback_register(file,
1559 &virtio_net_device_ops);
1561 rte_exit(EXIT_FAILURE,
1562 "failed to register vhost driver callbacks.\n");
1565 if (rte_vhost_driver_start(file) < 0) {
1566 rte_exit(EXIT_FAILURE,
1567 "failed to start vhost driver.\n");
1571 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1572 rte_eal_wait_lcore(lcore_id);