4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
93 /* Maximum long option length for option parsing. */
94 #define MAX_LONG_OPT_SZ 64
96 /* mask of enabled ports */
97 static uint32_t enabled_port_mask = 0;
99 /* Promiscuous mode */
100 static uint32_t promiscuous;
102 /* number of devices/queues to support*/
103 static uint32_t num_queues = 0;
104 static uint32_t num_devices;
106 static struct rte_mempool *mbuf_pool;
107 static int mergeable;
109 /* Do vlan strip on host, enabled on default */
110 static uint32_t vlan_strip = 1;
112 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
119 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
122 static uint32_t enable_stats = 0;
123 /* Enable retries on RX. */
124 static uint32_t enable_retry = 1;
126 /* Disable TX checksum offload */
127 static uint32_t enable_tx_csum;
129 /* Disable TSO offload */
130 static uint32_t enable_tso;
132 static int client_mode;
134 /* Specify timeout (in useconds) between retries on RX. */
135 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
136 /* Specify the number of retries on RX. */
137 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
139 /* Socket file paths. Can be set by user */
140 static char *socket_files;
141 static int nb_sockets;
143 /* empty vmdq configuration structure. Filled in programatically */
144 static struct rte_eth_conf vmdq_conf_default = {
146 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
148 .header_split = 0, /**< Header Split disabled */
149 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
150 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
152 * It is necessary for 1G NIC such as I350,
153 * this fixes bug of ipv4 forwarding in guest can't
154 * forward pakets from one virtio dev to another virtio dev.
156 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
157 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
158 .hw_strip_crc = 0, /**< CRC stripped by hardware */
162 .mq_mode = ETH_MQ_TX_NONE,
166 * should be overridden separately in code with
170 .nb_queue_pools = ETH_8_POOLS,
171 .enable_default_pool = 0,
174 .pool_map = {{0, 0},},
179 static unsigned lcore_ids[RTE_MAX_LCORE];
180 static uint8_t ports[RTE_MAX_ETHPORTS];
181 static unsigned num_ports = 0; /**< The number of ports specified in command line */
182 static uint16_t num_pf_queues, num_vmdq_queues;
183 static uint16_t vmdq_pool_base, vmdq_queue_base;
184 static uint16_t queues_per_pool;
186 const uint16_t vlan_tags[] = {
187 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
188 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
189 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
190 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
191 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
192 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
193 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
194 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
197 /* ethernet addresses of ports */
198 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
200 static struct vhost_dev_tailq_list vhost_dev_list =
201 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
203 static struct lcore_info lcore_info[RTE_MAX_LCORE];
205 /* Used for queueing bursts of TX packets. */
209 struct rte_mbuf *m_table[MAX_PKT_BURST];
212 /* TX queue for each data core. */
213 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
215 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
216 / US_PER_S * BURST_TX_DRAIN_US)
220 * Builds up the correct configuration for VMDQ VLAN pool map
221 * according to the pool & queue limits.
224 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
226 struct rte_eth_vmdq_rx_conf conf;
227 struct rte_eth_vmdq_rx_conf *def_conf =
228 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
231 memset(&conf, 0, sizeof(conf));
232 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
233 conf.nb_pool_maps = num_devices;
234 conf.enable_loop_back = def_conf->enable_loop_back;
235 conf.rx_mode = def_conf->rx_mode;
237 for (i = 0; i < conf.nb_pool_maps; i++) {
238 conf.pool_map[i].vlan_id = vlan_tags[ i ];
239 conf.pool_map[i].pools = (1UL << i);
242 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
243 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
244 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
249 * Validate the device number according to the max pool number gotten form
250 * dev_info. If the device number is invalid, give the error message and
251 * return -1. Each device must have its own pool.
254 validate_num_devices(uint32_t max_nb_devices)
256 if (num_devices > max_nb_devices) {
257 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
264 * Initialises a given port using global settings and with the rx buffers
265 * coming from the mbuf_pool passed as parameter
268 port_init(uint8_t port)
270 struct rte_eth_dev_info dev_info;
271 struct rte_eth_conf port_conf;
272 struct rte_eth_rxconf *rxconf;
273 struct rte_eth_txconf *txconf;
274 int16_t rx_rings, tx_rings;
275 uint16_t rx_ring_size, tx_ring_size;
279 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
280 rte_eth_dev_info_get (port, &dev_info);
282 if (dev_info.max_rx_queues > MAX_QUEUES) {
283 rte_exit(EXIT_FAILURE,
284 "please define MAX_QUEUES no less than %u in %s\n",
285 dev_info.max_rx_queues, __FILE__);
288 rxconf = &dev_info.default_rxconf;
289 txconf = &dev_info.default_txconf;
290 rxconf->rx_drop_en = 1;
292 /* Enable vlan offload */
293 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
295 /*configure the number of supported virtio devices based on VMDQ limits */
296 num_devices = dev_info.max_vmdq_pools;
298 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
299 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
300 tx_rings = (uint16_t)rte_lcore_count();
302 retval = validate_num_devices(MAX_DEVICES);
306 /* Get port configuration. */
307 retval = get_eth_conf(&port_conf, num_devices);
310 /* NIC queues are divided into pf queues and vmdq queues. */
311 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
312 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
313 num_vmdq_queues = num_devices * queues_per_pool;
314 num_queues = num_pf_queues + num_vmdq_queues;
315 vmdq_queue_base = dev_info.vmdq_queue_base;
316 vmdq_pool_base = dev_info.vmdq_pool_base;
317 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
318 num_pf_queues, num_devices, queues_per_pool);
320 if (port >= rte_eth_dev_count()) return -1;
322 if (enable_tx_csum == 0)
323 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
325 if (enable_tso == 0) {
326 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
327 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
329 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
332 rx_rings = (uint16_t)dev_info.max_rx_queues;
333 /* Configure ethernet device. */
334 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
336 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
337 port, strerror(-retval));
341 /* Setup the queues. */
342 for (q = 0; q < rx_rings; q ++) {
343 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 rte_eth_dev_socket_id(port),
348 RTE_LOG(ERR, VHOST_PORT,
349 "Failed to setup rx queue %u of port %u: %s.\n",
350 q, port, strerror(-retval));
354 for (q = 0; q < tx_rings; q ++) {
355 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
356 rte_eth_dev_socket_id(port),
359 RTE_LOG(ERR, VHOST_PORT,
360 "Failed to setup tx queue %u of port %u: %s.\n",
361 q, port, strerror(-retval));
366 /* Start the device. */
367 retval = rte_eth_dev_start(port);
369 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
370 port, strerror(-retval));
375 rte_eth_promiscuous_enable(port);
377 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
378 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
379 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
380 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
382 vmdq_ports_eth_addr[port].addr_bytes[0],
383 vmdq_ports_eth_addr[port].addr_bytes[1],
384 vmdq_ports_eth_addr[port].addr_bytes[2],
385 vmdq_ports_eth_addr[port].addr_bytes[3],
386 vmdq_ports_eth_addr[port].addr_bytes[4],
387 vmdq_ports_eth_addr[port].addr_bytes[5]);
393 * Set socket file path.
396 us_vhost_parse_socket_path(const char *q_arg)
398 /* parse number string */
399 if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
402 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
403 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
410 * Parse the portmask provided at run time.
413 parse_portmask(const char *portmask)
420 /* parse hexadecimal string */
421 pm = strtoul(portmask, &end, 16);
422 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
433 * Parse num options at run time.
436 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
443 /* parse unsigned int string */
444 num = strtoul(q_arg, &end, 10);
445 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
448 if (num > max_valid_value)
459 us_vhost_usage(const char *prgname)
461 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
463 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
464 " --socket-file <path>\n"
466 " -p PORTMASK: Set mask for ports to be used by application\n"
467 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
468 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
469 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
470 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
471 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
472 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
473 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
474 " --socket-file: The path of the socket file.\n"
475 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
476 " --tso [0|1] disable/enable TCP segment offload.\n"
477 " --client register a vhost-user socket as client mode.\n",
482 * Parse the arguments given in the command line of the application.
485 us_vhost_parse_args(int argc, char **argv)
490 const char *prgname = argv[0];
491 static struct option long_option[] = {
492 {"vm2vm", required_argument, NULL, 0},
493 {"rx-retry", required_argument, NULL, 0},
494 {"rx-retry-delay", required_argument, NULL, 0},
495 {"rx-retry-num", required_argument, NULL, 0},
496 {"mergeable", required_argument, NULL, 0},
497 {"vlan-strip", required_argument, NULL, 0},
498 {"stats", required_argument, NULL, 0},
499 {"socket-file", required_argument, NULL, 0},
500 {"tx-csum", required_argument, NULL, 0},
501 {"tso", required_argument, NULL, 0},
502 {"client", no_argument, &client_mode, 1},
506 /* Parse command line */
507 while ((opt = getopt_long(argc, argv, "p:P",
508 long_option, &option_index)) != EOF) {
512 enabled_port_mask = parse_portmask(optarg);
513 if (enabled_port_mask == 0) {
514 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
515 us_vhost_usage(prgname);
522 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
523 ETH_VMDQ_ACCEPT_BROADCAST |
524 ETH_VMDQ_ACCEPT_MULTICAST;
525 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
530 /* Enable/disable vm2vm comms. */
531 if (!strncmp(long_option[option_index].name, "vm2vm",
533 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
535 RTE_LOG(INFO, VHOST_CONFIG,
536 "Invalid argument for "
538 us_vhost_usage(prgname);
541 vm2vm_mode = (vm2vm_type)ret;
545 /* Enable/disable retries on RX. */
546 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
547 ret = parse_num_opt(optarg, 1);
549 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
550 us_vhost_usage(prgname);
557 /* Enable/disable TX checksum offload. */
558 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
559 ret = parse_num_opt(optarg, 1);
561 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
562 us_vhost_usage(prgname);
565 enable_tx_csum = ret;
568 /* Enable/disable TSO offload. */
569 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
570 ret = parse_num_opt(optarg, 1);
572 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
573 us_vhost_usage(prgname);
579 /* Specify the retries delay time (in useconds) on RX. */
580 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
581 ret = parse_num_opt(optarg, INT32_MAX);
583 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
584 us_vhost_usage(prgname);
587 burst_rx_delay_time = ret;
591 /* Specify the retries number on RX. */
592 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
593 ret = parse_num_opt(optarg, INT32_MAX);
595 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
596 us_vhost_usage(prgname);
599 burst_rx_retry_num = ret;
603 /* Enable/disable RX mergeable buffers. */
604 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
605 ret = parse_num_opt(optarg, 1);
607 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
608 us_vhost_usage(prgname);
613 vmdq_conf_default.rxmode.jumbo_frame = 1;
614 vmdq_conf_default.rxmode.max_rx_pkt_len
615 = JUMBO_FRAME_MAX_SIZE;
620 /* Enable/disable RX VLAN strip on host. */
621 if (!strncmp(long_option[option_index].name,
622 "vlan-strip", MAX_LONG_OPT_SZ)) {
623 ret = parse_num_opt(optarg, 1);
625 RTE_LOG(INFO, VHOST_CONFIG,
626 "Invalid argument for VLAN strip [0|1]\n");
627 us_vhost_usage(prgname);
631 vmdq_conf_default.rxmode.hw_vlan_strip =
636 /* Enable/disable stats. */
637 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
638 ret = parse_num_opt(optarg, INT32_MAX);
640 RTE_LOG(INFO, VHOST_CONFIG,
641 "Invalid argument for stats [0..N]\n");
642 us_vhost_usage(prgname);
649 /* Set socket file path. */
650 if (!strncmp(long_option[option_index].name,
651 "socket-file", MAX_LONG_OPT_SZ)) {
652 if (us_vhost_parse_socket_path(optarg) == -1) {
653 RTE_LOG(INFO, VHOST_CONFIG,
654 "Invalid argument for socket name (Max %d characters)\n",
656 us_vhost_usage(prgname);
663 /* Invalid option - print options. */
665 us_vhost_usage(prgname);
670 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
671 if (enabled_port_mask & (1 << i))
672 ports[num_ports++] = (uint8_t)i;
675 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
676 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
677 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
685 * Update the global var NUM_PORTS and array PORTS according to system ports number
686 * and return valid ports number
688 static unsigned check_ports_num(unsigned nb_ports)
690 unsigned valid_num_ports = num_ports;
693 if (num_ports > nb_ports) {
694 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
695 num_ports, nb_ports);
696 num_ports = nb_ports;
699 for (portid = 0; portid < num_ports; portid ++) {
700 if (ports[portid] >= nb_ports) {
701 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
702 ports[portid], (nb_ports - 1));
703 ports[portid] = INVALID_PORT_ID;
707 return valid_num_ports;
710 static inline struct vhost_dev *__attribute__((always_inline))
711 find_vhost_dev(struct ether_addr *mac)
713 struct vhost_dev *vdev;
715 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
716 if (vdev->ready == DEVICE_RX &&
717 is_same_ether_addr(mac, &vdev->mac_address))
725 * This function learns the MAC address of the device and registers this along with a
726 * vlan tag to a VMDQ.
729 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
731 struct ether_hdr *pkt_hdr;
734 /* Learn MAC address of guest device from packet */
735 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
737 if (find_vhost_dev(&pkt_hdr->s_addr)) {
738 RTE_LOG(ERR, VHOST_DATA,
739 "(%d) device is using a registered MAC!\n",
744 for (i = 0; i < ETHER_ADDR_LEN; i++)
745 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
747 /* vlan_tag currently uses the device_id. */
748 vdev->vlan_tag = vlan_tags[vdev->vid];
750 /* Print out VMDQ registration info. */
751 RTE_LOG(INFO, VHOST_DATA,
752 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
754 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
755 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
756 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
759 /* Register the MAC address. */
760 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
761 (uint32_t)vdev->vid + vmdq_pool_base);
763 RTE_LOG(ERR, VHOST_DATA,
764 "(%d) failed to add device MAC address to VMDQ\n",
767 /* Enable stripping of the vlan tag as we handle routing. */
769 rte_eth_dev_set_vlan_strip_on_queue(ports[0],
770 (uint16_t)vdev->vmdq_rx_q, 1);
772 /* Set device as ready for RX. */
773 vdev->ready = DEVICE_RX;
779 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
780 * queue before disabling RX on the device.
783 unlink_vmdq(struct vhost_dev *vdev)
787 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
789 if (vdev->ready == DEVICE_RX) {
790 /*clear MAC and VLAN settings*/
791 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
792 for (i = 0; i < 6; i++)
793 vdev->mac_address.addr_bytes[i] = 0;
797 /*Clear out the receive buffers*/
798 rx_count = rte_eth_rx_burst(ports[0],
799 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
802 for (i = 0; i < rx_count; i++)
803 rte_pktmbuf_free(pkts_burst[i]);
805 rx_count = rte_eth_rx_burst(ports[0],
806 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
809 vdev->ready = DEVICE_MAC_LEARNING;
813 static inline void __attribute__((always_inline))
814 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
819 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
821 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
822 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
823 src_vdev->stats.tx_total++;
824 src_vdev->stats.tx += ret;
829 * Check if the packet destination MAC address is for a local device. If so then put
830 * the packet on that devices RX queue. If not then return.
832 static inline int __attribute__((always_inline))
833 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
835 struct ether_hdr *pkt_hdr;
836 struct vhost_dev *dst_vdev;
838 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
840 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
844 if (vdev->vid == dst_vdev->vid) {
845 RTE_LOG(DEBUG, VHOST_DATA,
846 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
851 RTE_LOG(DEBUG, VHOST_DATA,
852 "(%d) TX: MAC address is local\n", dst_vdev->vid);
854 if (unlikely(dst_vdev->remove)) {
855 RTE_LOG(DEBUG, VHOST_DATA,
856 "(%d) device is marked for removal\n", dst_vdev->vid);
860 virtio_xmit(dst_vdev, vdev, m);
865 * Check if the destination MAC of a packet is one local VM,
866 * and get its vlan tag, and offset if it is.
868 static inline int __attribute__((always_inline))
869 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
870 uint32_t *offset, uint16_t *vlan_tag)
872 struct vhost_dev *dst_vdev;
873 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
875 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
879 if (vdev->vid == dst_vdev->vid) {
880 RTE_LOG(DEBUG, VHOST_DATA,
881 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
887 * HW vlan strip will reduce the packet length
888 * by minus length of vlan tag, so need restore
889 * the packet length by plus it.
892 *vlan_tag = vlan_tags[vdev->vid];
894 RTE_LOG(DEBUG, VHOST_DATA,
895 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
896 vdev->vid, dst_vdev->vid, *vlan_tag);
902 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
904 if (ol_flags & PKT_TX_IPV4)
905 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
906 else /* assume ethertype == ETHER_TYPE_IPv6 */
907 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
910 static void virtio_tx_offload(struct rte_mbuf *m)
913 struct ipv4_hdr *ipv4_hdr = NULL;
914 struct tcp_hdr *tcp_hdr = NULL;
915 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
917 l3_hdr = (char *)eth_hdr + m->l2_len;
919 if (m->ol_flags & PKT_TX_IPV4) {
921 ipv4_hdr->hdr_checksum = 0;
922 m->ol_flags |= PKT_TX_IP_CKSUM;
925 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
926 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
930 free_pkts(struct rte_mbuf **pkts, uint16_t n)
933 rte_pktmbuf_free(pkts[n]);
936 static inline void __attribute__((always_inline))
937 do_drain_mbuf_table(struct mbuf_table *tx_q)
941 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
942 tx_q->m_table, tx_q->len);
943 if (unlikely(count < tx_q->len))
944 free_pkts(&tx_q->m_table[count], tx_q->len - count);
950 * This function routes the TX packet to the correct interface. This
951 * may be a local device or the physical port.
953 static inline void __attribute__((always_inline))
954 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
956 struct mbuf_table *tx_q;
958 const uint16_t lcore_id = rte_lcore_id();
959 struct ether_hdr *nh;
962 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
963 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
964 struct vhost_dev *vdev2;
966 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
967 virtio_xmit(vdev2, vdev, m);
972 /*check if destination is local VM*/
973 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
978 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
979 if (unlikely(find_local_dest(vdev, m, &offset,
986 RTE_LOG(DEBUG, VHOST_DATA,
987 "(%d) TX: MAC address is external\n", vdev->vid);
991 /*Add packet to the port tx queue*/
992 tx_q = &lcore_tx_queue[lcore_id];
994 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
995 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
996 /* Guest has inserted the vlan tag. */
997 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
998 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
999 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1000 (vh->vlan_tci != vlan_tag_be))
1001 vh->vlan_tci = vlan_tag_be;
1003 m->ol_flags |= PKT_TX_VLAN_PKT;
1006 * Find the right seg to adjust the data len when offset is
1007 * bigger than tail room size.
1009 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1010 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1011 m->data_len += offset;
1013 struct rte_mbuf *seg = m;
1015 while ((seg->next != NULL) &&
1016 (offset > rte_pktmbuf_tailroom(seg)))
1019 seg->data_len += offset;
1021 m->pkt_len += offset;
1024 m->vlan_tci = vlan_tag;
1027 if (m->ol_flags & PKT_TX_TCP_SEG)
1028 virtio_tx_offload(m);
1030 tx_q->m_table[tx_q->len++] = m;
1032 vdev->stats.tx_total++;
1036 if (unlikely(tx_q->len == MAX_PKT_BURST))
1037 do_drain_mbuf_table(tx_q);
1041 static inline void __attribute__((always_inline))
1042 drain_mbuf_table(struct mbuf_table *tx_q)
1044 static uint64_t prev_tsc;
1050 cur_tsc = rte_rdtsc();
1051 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1054 RTE_LOG(DEBUG, VHOST_DATA,
1055 "TX queue drained after timeout with burst size %u\n",
1057 do_drain_mbuf_table(tx_q);
1061 static inline void __attribute__((always_inline))
1062 drain_eth_rx(struct vhost_dev *vdev)
1064 uint16_t rx_count, enqueue_count;
1065 struct rte_mbuf *pkts[MAX_PKT_BURST];
1067 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1068 pkts, MAX_PKT_BURST);
1073 * When "enable_retry" is set, here we wait and retry when there
1074 * is no enough free slots in the queue to hold @rx_count packets,
1075 * to diminish packet loss.
1078 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1082 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1083 rte_delay_us(burst_rx_delay_time);
1084 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1090 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1093 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1094 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1097 free_pkts(pkts, rx_count);
1100 static inline void __attribute__((always_inline))
1101 drain_virtio_tx(struct vhost_dev *vdev)
1103 struct rte_mbuf *pkts[MAX_PKT_BURST];
1107 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1108 pkts, MAX_PKT_BURST);
1110 /* setup VMDq for the first packet */
1111 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1112 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1113 free_pkts(pkts, count);
1116 for (i = 0; i < count; ++i)
1117 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1121 * Main function of vhost-switch. It basically does:
1123 * for each vhost device {
1126 * Which drains the host eth Rx queue linked to the vhost device,
1127 * and deliver all of them to guest virito Rx ring associated with
1128 * this vhost device.
1130 * - drain_virtio_tx()
1132 * Which drains the guest virtio Tx queue and deliver all of them
1133 * to the target, which could be another vhost device, or the
1134 * physical eth dev. The route is done in function "virtio_tx_route".
1138 switch_worker(void *arg __rte_unused)
1141 unsigned lcore_id = rte_lcore_id();
1142 struct vhost_dev *vdev;
1143 struct mbuf_table *tx_q;
1145 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1147 tx_q = &lcore_tx_queue[lcore_id];
1148 for (i = 0; i < rte_lcore_count(); i++) {
1149 if (lcore_ids[i] == lcore_id) {
1156 drain_mbuf_table(tx_q);
1159 * Inform the configuration core that we have exited the
1160 * linked list and that no devices are in use if requested.
1162 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1163 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1166 * Process vhost devices
1168 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1170 if (unlikely(vdev->remove)) {
1172 vdev->ready = DEVICE_SAFE_REMOVE;
1176 if (likely(vdev->ready == DEVICE_RX))
1179 if (likely(!vdev->remove))
1180 drain_virtio_tx(vdev);
1188 * Remove a device from the specific data core linked list and from the
1189 * main linked list. Synchonization occurs through the use of the
1190 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1191 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1194 destroy_device(int vid)
1196 struct vhost_dev *vdev = NULL;
1199 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1200 if (vdev->vid == vid)
1205 /*set the remove flag. */
1207 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1211 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1213 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1216 /* Set the dev_removal_flag on each lcore. */
1217 RTE_LCORE_FOREACH_SLAVE(lcore)
1218 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1221 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1222 * we can be sure that they can no longer access the device removed
1223 * from the linked lists and that the devices are no longer in use.
1225 RTE_LCORE_FOREACH_SLAVE(lcore) {
1226 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1230 lcore_info[vdev->coreid].device_num--;
1232 RTE_LOG(INFO, VHOST_DATA,
1233 "(%d) device has been removed from data core\n",
1240 * A new device is added to a data core. First the device is added to the main linked list
1241 * and the allocated to a specific data core.
1246 int lcore, core_add = 0;
1247 uint32_t device_num_min = num_devices;
1248 struct vhost_dev *vdev;
1250 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1252 RTE_LOG(INFO, VHOST_DATA,
1253 "(%d) couldn't allocate memory for vhost dev\n",
1259 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1260 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1262 /*reset ready flag*/
1263 vdev->ready = DEVICE_MAC_LEARNING;
1266 /* Find a suitable lcore to add the device. */
1267 RTE_LCORE_FOREACH_SLAVE(lcore) {
1268 if (lcore_info[lcore].device_num < device_num_min) {
1269 device_num_min = lcore_info[lcore].device_num;
1273 vdev->coreid = core_add;
1275 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1277 lcore_info[vdev->coreid].device_num++;
1279 /* Disable notifications. */
1280 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1281 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1283 RTE_LOG(INFO, VHOST_DATA,
1284 "(%d) device has been added to data core %d\n",
1291 * These callback allow devices to be added to the data core when configuration
1292 * has been fully complete.
1294 static const struct virtio_net_device_ops virtio_net_device_ops =
1296 .new_device = new_device,
1297 .destroy_device = destroy_device,
1301 * This is a thread will wake up after a period to print stats if the user has
1307 struct vhost_dev *vdev;
1308 uint64_t tx_dropped, rx_dropped;
1309 uint64_t tx, tx_total, rx, rx_total;
1310 const char clr[] = { 27, '[', '2', 'J', '\0' };
1311 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1314 sleep(enable_stats);
1316 /* Clear screen and move to top left */
1317 printf("%s%s\n", clr, top_left);
1318 printf("Device statistics =================================\n");
1320 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1321 tx_total = vdev->stats.tx_total;
1322 tx = vdev->stats.tx;
1323 tx_dropped = tx_total - tx;
1325 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1326 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1327 rx_dropped = rx_total - rx;
1329 printf("Statistics for device %d\n"
1330 "-----------------------\n"
1331 "TX total: %" PRIu64 "\n"
1332 "TX dropped: %" PRIu64 "\n"
1333 "TX successful: %" PRIu64 "\n"
1334 "RX total: %" PRIu64 "\n"
1335 "RX dropped: %" PRIu64 "\n"
1336 "RX successful: %" PRIu64 "\n",
1338 tx_total, tx_dropped, tx,
1339 rx_total, rx_dropped, rx);
1342 printf("===================================================\n");
1347 unregister_drivers(int socket_num)
1351 for (i = 0; i < socket_num; i++) {
1352 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1354 RTE_LOG(ERR, VHOST_CONFIG,
1355 "Fail to unregister vhost driver for %s.\n",
1356 socket_files + i * PATH_MAX);
1360 /* When we receive a INT signal, unregister vhost driver */
1362 sigint_handler(__rte_unused int signum)
1364 /* Unregister vhost driver. */
1365 unregister_drivers(nb_sockets);
1371 * While creating an mbuf pool, one key thing is to figure out how
1372 * many mbuf entries is enough for our use. FYI, here are some
1375 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1377 * - For each switch core (A CPU core does the packet switch), we need
1378 * also make some reservation for receiving the packets from virtio
1379 * Tx queue. How many is enough depends on the usage. It's normally
1380 * a simple calculation like following:
1382 * MAX_PKT_BURST * max packet size / mbuf size
1384 * So, we definitely need allocate more mbufs when TSO is enabled.
1386 * - Similarly, for each switching core, we should serve @nr_rx_desc
1387 * mbufs for receiving the packets from physical NIC device.
1389 * - We also need make sure, for each switch core, we have allocated
1390 * enough mbufs to fill up the mbuf cache.
1393 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1394 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1397 uint32_t nr_mbufs_per_core;
1398 uint32_t mtu = 1500;
1405 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1406 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1407 nr_mbufs_per_core += nr_rx_desc;
1408 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1410 nr_mbufs = nr_queues * nr_rx_desc;
1411 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1412 nr_mbufs *= nr_port;
1414 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1415 nr_mbuf_cache, 0, mbuf_size,
1417 if (mbuf_pool == NULL)
1418 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1422 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1423 * device is also registered here to handle the IOCTLs.
1426 main(int argc, char *argv[])
1428 unsigned lcore_id, core_id = 0;
1429 unsigned nb_ports, valid_num_ports;
1432 static pthread_t tid;
1433 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1436 signal(SIGINT, sigint_handler);
1439 ret = rte_eal_init(argc, argv);
1441 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1445 /* parse app arguments */
1446 ret = us_vhost_parse_args(argc, argv);
1448 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1450 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1451 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1453 if (rte_lcore_is_enabled(lcore_id))
1454 lcore_ids[core_id ++] = lcore_id;
1456 if (rte_lcore_count() > RTE_MAX_LCORE)
1457 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1459 /* Get the number of physical ports. */
1460 nb_ports = rte_eth_dev_count();
1463 * Update the global var NUM_PORTS and global array PORTS
1464 * and get value of var VALID_NUM_PORTS according to system ports number
1466 valid_num_ports = check_ports_num(nb_ports);
1468 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1469 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1470 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1475 * FIXME: here we are trying to allocate mbufs big enough for
1476 * @MAX_QUEUES, but the truth is we're never going to use that
1477 * many queues here. We probably should only do allocation for
1478 * those queues we are going to use.
1480 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1481 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1483 if (vm2vm_mode == VM2VM_HARDWARE) {
1484 /* Enable VT loop back to let L2 switch to do it. */
1485 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1486 RTE_LOG(DEBUG, VHOST_CONFIG,
1487 "Enable loop back for L2 switch in vmdq.\n");
1490 /* initialize all ports */
1491 for (portid = 0; portid < nb_ports; portid++) {
1492 /* skip ports that are not enabled */
1493 if ((enabled_port_mask & (1 << portid)) == 0) {
1494 RTE_LOG(INFO, VHOST_PORT,
1495 "Skipping disabled port %d\n", portid);
1498 if (port_init(portid) != 0)
1499 rte_exit(EXIT_FAILURE,
1500 "Cannot initialize network ports\n");
1503 /* Enable stats if the user option is set. */
1505 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1507 rte_exit(EXIT_FAILURE,
1508 "Cannot create print-stats thread\n");
1510 /* Set thread_name for aid in debugging. */
1511 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1512 ret = rte_thread_setname(tid, thread_name);
1514 RTE_LOG(DEBUG, VHOST_CONFIG,
1515 "Cannot set print-stats name\n");
1518 /* Launch all data cores. */
1519 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1520 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1523 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1526 flags |= RTE_VHOST_USER_CLIENT;
1528 /* Register vhost user driver to handle vhost messages. */
1529 for (i = 0; i < nb_sockets; i++) {
1530 ret = rte_vhost_driver_register
1531 (socket_files + i * PATH_MAX, flags);
1533 unregister_drivers(i);
1534 rte_exit(EXIT_FAILURE,
1535 "vhost driver register failure.\n");
1539 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1541 /* Start CUSE session. */
1542 rte_vhost_driver_session_start();