1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Max number of devices. Limited by vmdq. */
59 #define MAX_DEVICES 64
61 /* Size of buffers used for snprintfs. */
62 #define MAX_PRINT_BUFF 6072
64 /* Maximum long option length for option parsing. */
65 #define MAX_LONG_OPT_SZ 64
67 /* mask of enabled ports */
68 static uint32_t enabled_port_mask = 0;
70 /* Promiscuous mode */
71 static uint32_t promiscuous;
73 /* number of devices/queues to support*/
74 static uint32_t num_queues = 0;
75 static uint32_t num_devices;
77 static struct rte_mempool *mbuf_pool;
80 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
87 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
90 static uint32_t enable_stats = 0;
91 /* Enable retries on RX. */
92 static uint32_t enable_retry = 1;
94 /* Disable TX checksum offload */
95 static uint32_t enable_tx_csum;
97 /* Disable TSO offload */
98 static uint32_t enable_tso;
100 static int client_mode;
101 static int dequeue_zero_copy;
103 static int builtin_net_driver;
105 /* Specify timeout (in useconds) between retries on RX. */
106 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
107 /* Specify the number of retries on RX. */
108 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
110 /* Socket file paths. Can be set by user */
111 static char *socket_files;
112 static int nb_sockets;
114 /* empty vmdq configuration structure. Filled in programatically */
115 static struct rte_eth_conf vmdq_conf_default = {
117 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
120 * VLAN strip is necessary for 1G NIC such as I350,
121 * this fixes bug of ipv4 forwarding in guest can't
122 * forward pakets from one virtio dev to another virtio dev.
124 .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
125 DEV_RX_OFFLOAD_VLAN_STRIP),
129 .mq_mode = ETH_MQ_TX_NONE,
130 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
131 DEV_TX_OFFLOAD_TCP_CKSUM |
132 DEV_TX_OFFLOAD_VLAN_INSERT |
133 DEV_TX_OFFLOAD_MULTI_SEGS |
134 DEV_TX_OFFLOAD_TCP_TSO),
138 * should be overridden separately in code with
142 .nb_queue_pools = ETH_8_POOLS,
143 .enable_default_pool = 0,
146 .pool_map = {{0, 0},},
152 static unsigned lcore_ids[RTE_MAX_LCORE];
153 static uint16_t ports[RTE_MAX_ETHPORTS];
154 static unsigned num_ports = 0; /**< The number of ports specified in command line */
155 static uint16_t num_pf_queues, num_vmdq_queues;
156 static uint16_t vmdq_pool_base, vmdq_queue_base;
157 static uint16_t queues_per_pool;
159 const uint16_t vlan_tags[] = {
160 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
161 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
162 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
163 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
164 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
165 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
166 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
167 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
170 /* ethernet addresses of ports */
171 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
173 static struct vhost_dev_tailq_list vhost_dev_list =
174 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
176 static struct lcore_info lcore_info[RTE_MAX_LCORE];
178 /* Used for queueing bursts of TX packets. */
182 struct rte_mbuf *m_table[MAX_PKT_BURST];
185 /* TX queue for each data core. */
186 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
188 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
189 / US_PER_S * BURST_TX_DRAIN_US)
193 * Builds up the correct configuration for VMDQ VLAN pool map
194 * according to the pool & queue limits.
197 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
199 struct rte_eth_vmdq_rx_conf conf;
200 struct rte_eth_vmdq_rx_conf *def_conf =
201 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
204 memset(&conf, 0, sizeof(conf));
205 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
206 conf.nb_pool_maps = num_devices;
207 conf.enable_loop_back = def_conf->enable_loop_back;
208 conf.rx_mode = def_conf->rx_mode;
210 for (i = 0; i < conf.nb_pool_maps; i++) {
211 conf.pool_map[i].vlan_id = vlan_tags[ i ];
212 conf.pool_map[i].pools = (1UL << i);
215 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
216 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
217 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
222 * Validate the device number according to the max pool number gotten form
223 * dev_info. If the device number is invalid, give the error message and
224 * return -1. Each device must have its own pool.
227 validate_num_devices(uint32_t max_nb_devices)
229 if (num_devices > max_nb_devices) {
230 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
237 * Initialises a given port using global settings and with the rx buffers
238 * coming from the mbuf_pool passed as parameter
241 port_init(uint16_t port)
243 struct rte_eth_dev_info dev_info;
244 struct rte_eth_conf port_conf;
245 struct rte_eth_rxconf *rxconf;
246 struct rte_eth_txconf *txconf;
247 int16_t rx_rings, tx_rings;
248 uint16_t rx_ring_size, tx_ring_size;
252 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
253 rte_eth_dev_info_get (port, &dev_info);
255 rxconf = &dev_info.default_rxconf;
256 txconf = &dev_info.default_txconf;
257 rxconf->rx_drop_en = 1;
259 /*configure the number of supported virtio devices based on VMDQ limits */
260 num_devices = dev_info.max_vmdq_pools;
262 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
263 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
266 * When dequeue zero copy is enabled, guest Tx used vring will be
267 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
268 * (tx_ring_size here) must be small enough so that the driver will
269 * hit the free threshold easily and free mbufs timely. Otherwise,
270 * guest Tx vring would be starved.
272 if (dequeue_zero_copy)
275 tx_rings = (uint16_t)rte_lcore_count();
277 retval = validate_num_devices(MAX_DEVICES);
281 /* Get port configuration. */
282 retval = get_eth_conf(&port_conf, num_devices);
285 /* NIC queues are divided into pf queues and vmdq queues. */
286 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
287 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
288 num_vmdq_queues = num_devices * queues_per_pool;
289 num_queues = num_pf_queues + num_vmdq_queues;
290 vmdq_queue_base = dev_info.vmdq_queue_base;
291 vmdq_pool_base = dev_info.vmdq_pool_base;
292 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
293 num_pf_queues, num_devices, queues_per_pool);
295 if (!rte_eth_dev_is_valid_port(port))
298 rx_rings = (uint16_t)dev_info.max_rx_queues;
299 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
300 port_conf.txmode.offloads |=
301 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
302 /* Configure ethernet device. */
303 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
305 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
306 port, strerror(-retval));
310 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
313 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
314 "for port %u: %s.\n", port, strerror(-retval));
317 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
318 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
319 "for Rx queues on port %u.\n", port);
323 /* Setup the queues. */
324 rxconf->offloads = port_conf.rxmode.offloads;
325 for (q = 0; q < rx_rings; q ++) {
326 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
327 rte_eth_dev_socket_id(port),
331 RTE_LOG(ERR, VHOST_PORT,
332 "Failed to setup rx queue %u of port %u: %s.\n",
333 q, port, strerror(-retval));
337 txconf->offloads = port_conf.txmode.offloads;
338 for (q = 0; q < tx_rings; q ++) {
339 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
340 rte_eth_dev_socket_id(port),
343 RTE_LOG(ERR, VHOST_PORT,
344 "Failed to setup tx queue %u of port %u: %s.\n",
345 q, port, strerror(-retval));
350 /* Start the device. */
351 retval = rte_eth_dev_start(port);
353 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
354 port, strerror(-retval));
359 rte_eth_promiscuous_enable(port);
361 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
362 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
363 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
364 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
366 vmdq_ports_eth_addr[port].addr_bytes[0],
367 vmdq_ports_eth_addr[port].addr_bytes[1],
368 vmdq_ports_eth_addr[port].addr_bytes[2],
369 vmdq_ports_eth_addr[port].addr_bytes[3],
370 vmdq_ports_eth_addr[port].addr_bytes[4],
371 vmdq_ports_eth_addr[port].addr_bytes[5]);
377 * Set socket file path.
380 us_vhost_parse_socket_path(const char *q_arg)
382 /* parse number string */
383 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
386 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
387 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
394 * Parse the portmask provided at run time.
397 parse_portmask(const char *portmask)
404 /* parse hexadecimal string */
405 pm = strtoul(portmask, &end, 16);
406 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
417 * Parse num options at run time.
420 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
427 /* parse unsigned int string */
428 num = strtoul(q_arg, &end, 10);
429 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
432 if (num > max_valid_value)
443 us_vhost_usage(const char *prgname)
445 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
447 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
448 " --socket-file <path>\n"
450 " -p PORTMASK: Set mask for ports to be used by application\n"
451 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
452 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
453 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
454 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
455 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
456 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
457 " --socket-file: The path of the socket file.\n"
458 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
459 " --tso [0|1] disable/enable TCP segment offload.\n"
460 " --client register a vhost-user socket as client mode.\n"
461 " --dequeue-zero-copy enables dequeue zero copy\n",
466 * Parse the arguments given in the command line of the application.
469 us_vhost_parse_args(int argc, char **argv)
474 const char *prgname = argv[0];
475 static struct option long_option[] = {
476 {"vm2vm", required_argument, NULL, 0},
477 {"rx-retry", required_argument, NULL, 0},
478 {"rx-retry-delay", required_argument, NULL, 0},
479 {"rx-retry-num", required_argument, NULL, 0},
480 {"mergeable", required_argument, NULL, 0},
481 {"stats", required_argument, NULL, 0},
482 {"socket-file", required_argument, NULL, 0},
483 {"tx-csum", required_argument, NULL, 0},
484 {"tso", required_argument, NULL, 0},
485 {"client", no_argument, &client_mode, 1},
486 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
487 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
491 /* Parse command line */
492 while ((opt = getopt_long(argc, argv, "p:P",
493 long_option, &option_index)) != EOF) {
497 enabled_port_mask = parse_portmask(optarg);
498 if (enabled_port_mask == 0) {
499 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
500 us_vhost_usage(prgname);
507 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
508 ETH_VMDQ_ACCEPT_BROADCAST |
509 ETH_VMDQ_ACCEPT_MULTICAST;
514 /* Enable/disable vm2vm comms. */
515 if (!strncmp(long_option[option_index].name, "vm2vm",
517 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
519 RTE_LOG(INFO, VHOST_CONFIG,
520 "Invalid argument for "
522 us_vhost_usage(prgname);
525 vm2vm_mode = (vm2vm_type)ret;
529 /* Enable/disable retries on RX. */
530 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
531 ret = parse_num_opt(optarg, 1);
533 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
534 us_vhost_usage(prgname);
541 /* Enable/disable TX checksum offload. */
542 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
543 ret = parse_num_opt(optarg, 1);
545 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
546 us_vhost_usage(prgname);
549 enable_tx_csum = ret;
552 /* Enable/disable TSO offload. */
553 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
554 ret = parse_num_opt(optarg, 1);
556 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
557 us_vhost_usage(prgname);
563 /* Specify the retries delay time (in useconds) on RX. */
564 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
565 ret = parse_num_opt(optarg, INT32_MAX);
567 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
568 us_vhost_usage(prgname);
571 burst_rx_delay_time = ret;
575 /* Specify the retries number on RX. */
576 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
577 ret = parse_num_opt(optarg, INT32_MAX);
579 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
580 us_vhost_usage(prgname);
583 burst_rx_retry_num = ret;
587 /* Enable/disable RX mergeable buffers. */
588 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
589 ret = parse_num_opt(optarg, 1);
591 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
592 us_vhost_usage(prgname);
597 vmdq_conf_default.rxmode.offloads |=
598 DEV_RX_OFFLOAD_JUMBO_FRAME;
599 vmdq_conf_default.rxmode.max_rx_pkt_len
600 = JUMBO_FRAME_MAX_SIZE;
605 /* Enable/disable stats. */
606 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
607 ret = parse_num_opt(optarg, INT32_MAX);
609 RTE_LOG(INFO, VHOST_CONFIG,
610 "Invalid argument for stats [0..N]\n");
611 us_vhost_usage(prgname);
618 /* Set socket file path. */
619 if (!strncmp(long_option[option_index].name,
620 "socket-file", MAX_LONG_OPT_SZ)) {
621 if (us_vhost_parse_socket_path(optarg) == -1) {
622 RTE_LOG(INFO, VHOST_CONFIG,
623 "Invalid argument for socket name (Max %d characters)\n",
625 us_vhost_usage(prgname);
632 /* Invalid option - print options. */
634 us_vhost_usage(prgname);
639 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
640 if (enabled_port_mask & (1 << i))
641 ports[num_ports++] = i;
644 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
645 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
646 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
654 * Update the global var NUM_PORTS and array PORTS according to system ports number
655 * and return valid ports number
657 static unsigned check_ports_num(unsigned nb_ports)
659 unsigned valid_num_ports = num_ports;
662 if (num_ports > nb_ports) {
663 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
664 num_ports, nb_ports);
665 num_ports = nb_ports;
668 for (portid = 0; portid < num_ports; portid ++) {
669 if (!rte_eth_dev_is_valid_port(ports[portid])) {
670 RTE_LOG(INFO, VHOST_PORT,
671 "\nSpecified port ID(%u) is not valid\n",
673 ports[portid] = INVALID_PORT_ID;
677 return valid_num_ports;
680 static __rte_always_inline struct vhost_dev *
681 find_vhost_dev(struct ether_addr *mac)
683 struct vhost_dev *vdev;
685 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
686 if (vdev->ready == DEVICE_RX &&
687 is_same_ether_addr(mac, &vdev->mac_address))
695 * This function learns the MAC address of the device and registers this along with a
696 * vlan tag to a VMDQ.
699 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
701 struct ether_hdr *pkt_hdr;
704 /* Learn MAC address of guest device from packet */
705 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
707 if (find_vhost_dev(&pkt_hdr->s_addr)) {
708 RTE_LOG(ERR, VHOST_DATA,
709 "(%d) device is using a registered MAC!\n",
714 for (i = 0; i < ETHER_ADDR_LEN; i++)
715 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
717 /* vlan_tag currently uses the device_id. */
718 vdev->vlan_tag = vlan_tags[vdev->vid];
720 /* Print out VMDQ registration info. */
721 RTE_LOG(INFO, VHOST_DATA,
722 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
724 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
725 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
726 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
729 /* Register the MAC address. */
730 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
731 (uint32_t)vdev->vid + vmdq_pool_base);
733 RTE_LOG(ERR, VHOST_DATA,
734 "(%d) failed to add device MAC address to VMDQ\n",
737 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
739 /* Set device as ready for RX. */
740 vdev->ready = DEVICE_RX;
746 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
747 * queue before disabling RX on the device.
750 unlink_vmdq(struct vhost_dev *vdev)
754 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
756 if (vdev->ready == DEVICE_RX) {
757 /*clear MAC and VLAN settings*/
758 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
759 for (i = 0; i < 6; i++)
760 vdev->mac_address.addr_bytes[i] = 0;
764 /*Clear out the receive buffers*/
765 rx_count = rte_eth_rx_burst(ports[0],
766 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
769 for (i = 0; i < rx_count; i++)
770 rte_pktmbuf_free(pkts_burst[i]);
772 rx_count = rte_eth_rx_burst(ports[0],
773 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
776 vdev->ready = DEVICE_MAC_LEARNING;
780 static __rte_always_inline void
781 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
786 if (builtin_net_driver) {
787 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
789 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
793 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
794 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
795 src_vdev->stats.tx_total++;
796 src_vdev->stats.tx += ret;
801 * Check if the packet destination MAC address is for a local device. If so then put
802 * the packet on that devices RX queue. If not then return.
804 static __rte_always_inline int
805 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
807 struct ether_hdr *pkt_hdr;
808 struct vhost_dev *dst_vdev;
810 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
812 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
816 if (vdev->vid == dst_vdev->vid) {
817 RTE_LOG_DP(DEBUG, VHOST_DATA,
818 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
823 RTE_LOG_DP(DEBUG, VHOST_DATA,
824 "(%d) TX: MAC address is local\n", dst_vdev->vid);
826 if (unlikely(dst_vdev->remove)) {
827 RTE_LOG_DP(DEBUG, VHOST_DATA,
828 "(%d) device is marked for removal\n", dst_vdev->vid);
832 virtio_xmit(dst_vdev, vdev, m);
837 * Check if the destination MAC of a packet is one local VM,
838 * and get its vlan tag, and offset if it is.
840 static __rte_always_inline int
841 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
842 uint32_t *offset, uint16_t *vlan_tag)
844 struct vhost_dev *dst_vdev;
845 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
847 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
851 if (vdev->vid == dst_vdev->vid) {
852 RTE_LOG_DP(DEBUG, VHOST_DATA,
853 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
859 * HW vlan strip will reduce the packet length
860 * by minus length of vlan tag, so need restore
861 * the packet length by plus it.
864 *vlan_tag = vlan_tags[vdev->vid];
866 RTE_LOG_DP(DEBUG, VHOST_DATA,
867 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
868 vdev->vid, dst_vdev->vid, *vlan_tag);
874 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
876 if (ol_flags & PKT_TX_IPV4)
877 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
878 else /* assume ethertype == ETHER_TYPE_IPv6 */
879 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
882 static void virtio_tx_offload(struct rte_mbuf *m)
885 struct ipv4_hdr *ipv4_hdr = NULL;
886 struct tcp_hdr *tcp_hdr = NULL;
887 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
889 l3_hdr = (char *)eth_hdr + m->l2_len;
891 if (m->ol_flags & PKT_TX_IPV4) {
893 ipv4_hdr->hdr_checksum = 0;
894 m->ol_flags |= PKT_TX_IP_CKSUM;
897 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
898 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
902 free_pkts(struct rte_mbuf **pkts, uint16_t n)
905 rte_pktmbuf_free(pkts[n]);
908 static __rte_always_inline void
909 do_drain_mbuf_table(struct mbuf_table *tx_q)
913 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
914 tx_q->m_table, tx_q->len);
915 if (unlikely(count < tx_q->len))
916 free_pkts(&tx_q->m_table[count], tx_q->len - count);
922 * This function routes the TX packet to the correct interface. This
923 * may be a local device or the physical port.
925 static __rte_always_inline void
926 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
928 struct mbuf_table *tx_q;
930 const uint16_t lcore_id = rte_lcore_id();
931 struct ether_hdr *nh;
934 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
935 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
936 struct vhost_dev *vdev2;
938 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
940 virtio_xmit(vdev2, vdev, m);
945 /*check if destination is local VM*/
946 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
951 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
952 if (unlikely(find_local_dest(vdev, m, &offset,
959 RTE_LOG_DP(DEBUG, VHOST_DATA,
960 "(%d) TX: MAC address is external\n", vdev->vid);
964 /*Add packet to the port tx queue*/
965 tx_q = &lcore_tx_queue[lcore_id];
967 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
968 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
969 /* Guest has inserted the vlan tag. */
970 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
971 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
972 if ((vm2vm_mode == VM2VM_HARDWARE) &&
973 (vh->vlan_tci != vlan_tag_be))
974 vh->vlan_tci = vlan_tag_be;
976 m->ol_flags |= PKT_TX_VLAN_PKT;
979 * Find the right seg to adjust the data len when offset is
980 * bigger than tail room size.
982 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
983 if (likely(offset <= rte_pktmbuf_tailroom(m)))
984 m->data_len += offset;
986 struct rte_mbuf *seg = m;
988 while ((seg->next != NULL) &&
989 (offset > rte_pktmbuf_tailroom(seg)))
992 seg->data_len += offset;
994 m->pkt_len += offset;
997 m->vlan_tci = vlan_tag;
1000 if (m->ol_flags & PKT_TX_TCP_SEG)
1001 virtio_tx_offload(m);
1003 tx_q->m_table[tx_q->len++] = m;
1005 vdev->stats.tx_total++;
1009 if (unlikely(tx_q->len == MAX_PKT_BURST))
1010 do_drain_mbuf_table(tx_q);
1014 static __rte_always_inline void
1015 drain_mbuf_table(struct mbuf_table *tx_q)
1017 static uint64_t prev_tsc;
1023 cur_tsc = rte_rdtsc();
1024 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1027 RTE_LOG_DP(DEBUG, VHOST_DATA,
1028 "TX queue drained after timeout with burst size %u\n",
1030 do_drain_mbuf_table(tx_q);
1034 static __rte_always_inline void
1035 drain_eth_rx(struct vhost_dev *vdev)
1037 uint16_t rx_count, enqueue_count;
1038 struct rte_mbuf *pkts[MAX_PKT_BURST];
1040 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1041 pkts, MAX_PKT_BURST);
1046 * When "enable_retry" is set, here we wait and retry when there
1047 * is no enough free slots in the queue to hold @rx_count packets,
1048 * to diminish packet loss.
1051 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1055 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1056 rte_delay_us(burst_rx_delay_time);
1057 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1063 if (builtin_net_driver) {
1064 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1067 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1071 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1072 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1075 free_pkts(pkts, rx_count);
1078 static __rte_always_inline void
1079 drain_virtio_tx(struct vhost_dev *vdev)
1081 struct rte_mbuf *pkts[MAX_PKT_BURST];
1085 if (builtin_net_driver) {
1086 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1087 pkts, MAX_PKT_BURST);
1089 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1090 mbuf_pool, pkts, MAX_PKT_BURST);
1093 /* setup VMDq for the first packet */
1094 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1095 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1096 free_pkts(pkts, count);
1099 for (i = 0; i < count; ++i)
1100 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1104 * Main function of vhost-switch. It basically does:
1106 * for each vhost device {
1109 * Which drains the host eth Rx queue linked to the vhost device,
1110 * and deliver all of them to guest virito Rx ring associated with
1111 * this vhost device.
1113 * - drain_virtio_tx()
1115 * Which drains the guest virtio Tx queue and deliver all of them
1116 * to the target, which could be another vhost device, or the
1117 * physical eth dev. The route is done in function "virtio_tx_route".
1121 switch_worker(void *arg __rte_unused)
1124 unsigned lcore_id = rte_lcore_id();
1125 struct vhost_dev *vdev;
1126 struct mbuf_table *tx_q;
1128 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1130 tx_q = &lcore_tx_queue[lcore_id];
1131 for (i = 0; i < rte_lcore_count(); i++) {
1132 if (lcore_ids[i] == lcore_id) {
1139 drain_mbuf_table(tx_q);
1142 * Inform the configuration core that we have exited the
1143 * linked list and that no devices are in use if requested.
1145 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1146 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1149 * Process vhost devices
1151 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1153 if (unlikely(vdev->remove)) {
1155 vdev->ready = DEVICE_SAFE_REMOVE;
1159 if (likely(vdev->ready == DEVICE_RX))
1162 if (likely(!vdev->remove))
1163 drain_virtio_tx(vdev);
1171 * Remove a device from the specific data core linked list and from the
1172 * main linked list. Synchonization occurs through the use of the
1173 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1174 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1177 destroy_device(int vid)
1179 struct vhost_dev *vdev = NULL;
1182 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1183 if (vdev->vid == vid)
1188 /*set the remove flag. */
1190 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1194 if (builtin_net_driver)
1195 vs_vhost_net_remove(vdev);
1197 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1199 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1202 /* Set the dev_removal_flag on each lcore. */
1203 RTE_LCORE_FOREACH_SLAVE(lcore)
1204 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1207 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1208 * we can be sure that they can no longer access the device removed
1209 * from the linked lists and that the devices are no longer in use.
1211 RTE_LCORE_FOREACH_SLAVE(lcore) {
1212 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1216 lcore_info[vdev->coreid].device_num--;
1218 RTE_LOG(INFO, VHOST_DATA,
1219 "(%d) device has been removed from data core\n",
1226 * A new device is added to a data core. First the device is added to the main linked list
1227 * and the allocated to a specific data core.
1232 int lcore, core_add = 0;
1233 uint32_t device_num_min = num_devices;
1234 struct vhost_dev *vdev;
1236 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1238 RTE_LOG(INFO, VHOST_DATA,
1239 "(%d) couldn't allocate memory for vhost dev\n",
1245 if (builtin_net_driver)
1246 vs_vhost_net_setup(vdev);
1248 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1249 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1251 /*reset ready flag*/
1252 vdev->ready = DEVICE_MAC_LEARNING;
1255 /* Find a suitable lcore to add the device. */
1256 RTE_LCORE_FOREACH_SLAVE(lcore) {
1257 if (lcore_info[lcore].device_num < device_num_min) {
1258 device_num_min = lcore_info[lcore].device_num;
1262 vdev->coreid = core_add;
1264 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1266 lcore_info[vdev->coreid].device_num++;
1268 /* Disable notifications. */
1269 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1270 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1272 RTE_LOG(INFO, VHOST_DATA,
1273 "(%d) device has been added to data core %d\n",
1280 * These callback allow devices to be added to the data core when configuration
1281 * has been fully complete.
1283 static const struct vhost_device_ops virtio_net_device_ops =
1285 .new_device = new_device,
1286 .destroy_device = destroy_device,
1290 * This is a thread will wake up after a period to print stats if the user has
1294 print_stats(__rte_unused void *arg)
1296 struct vhost_dev *vdev;
1297 uint64_t tx_dropped, rx_dropped;
1298 uint64_t tx, tx_total, rx, rx_total;
1299 const char clr[] = { 27, '[', '2', 'J', '\0' };
1300 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1303 sleep(enable_stats);
1305 /* Clear screen and move to top left */
1306 printf("%s%s\n", clr, top_left);
1307 printf("Device statistics =================================\n");
1309 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1310 tx_total = vdev->stats.tx_total;
1311 tx = vdev->stats.tx;
1312 tx_dropped = tx_total - tx;
1314 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1315 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1316 rx_dropped = rx_total - rx;
1318 printf("Statistics for device %d\n"
1319 "-----------------------\n"
1320 "TX total: %" PRIu64 "\n"
1321 "TX dropped: %" PRIu64 "\n"
1322 "TX successful: %" PRIu64 "\n"
1323 "RX total: %" PRIu64 "\n"
1324 "RX dropped: %" PRIu64 "\n"
1325 "RX successful: %" PRIu64 "\n",
1327 tx_total, tx_dropped, tx,
1328 rx_total, rx_dropped, rx);
1331 printf("===================================================\n");
1338 unregister_drivers(int socket_num)
1342 for (i = 0; i < socket_num; i++) {
1343 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1345 RTE_LOG(ERR, VHOST_CONFIG,
1346 "Fail to unregister vhost driver for %s.\n",
1347 socket_files + i * PATH_MAX);
1351 /* When we receive a INT signal, unregister vhost driver */
1353 sigint_handler(__rte_unused int signum)
1355 /* Unregister vhost driver. */
1356 unregister_drivers(nb_sockets);
1362 * While creating an mbuf pool, one key thing is to figure out how
1363 * many mbuf entries is enough for our use. FYI, here are some
1366 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1368 * - For each switch core (A CPU core does the packet switch), we need
1369 * also make some reservation for receiving the packets from virtio
1370 * Tx queue. How many is enough depends on the usage. It's normally
1371 * a simple calculation like following:
1373 * MAX_PKT_BURST * max packet size / mbuf size
1375 * So, we definitely need allocate more mbufs when TSO is enabled.
1377 * - Similarly, for each switching core, we should serve @nr_rx_desc
1378 * mbufs for receiving the packets from physical NIC device.
1380 * - We also need make sure, for each switch core, we have allocated
1381 * enough mbufs to fill up the mbuf cache.
1384 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1385 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1388 uint32_t nr_mbufs_per_core;
1389 uint32_t mtu = 1500;
1396 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1397 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1398 nr_mbufs_per_core += nr_rx_desc;
1399 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1401 nr_mbufs = nr_queues * nr_rx_desc;
1402 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1403 nr_mbufs *= nr_port;
1405 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1406 nr_mbuf_cache, 0, mbuf_size,
1408 if (mbuf_pool == NULL)
1409 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1413 * Main function, does initialisation and calls the per-lcore functions.
1416 main(int argc, char *argv[])
1418 unsigned lcore_id, core_id = 0;
1419 unsigned nb_ports, valid_num_ports;
1422 static pthread_t tid;
1425 signal(SIGINT, sigint_handler);
1428 ret = rte_eal_init(argc, argv);
1430 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1434 /* parse app arguments */
1435 ret = us_vhost_parse_args(argc, argv);
1437 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1439 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1440 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1442 if (rte_lcore_is_enabled(lcore_id))
1443 lcore_ids[core_id++] = lcore_id;
1446 if (rte_lcore_count() > RTE_MAX_LCORE)
1447 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1449 /* Get the number of physical ports. */
1450 nb_ports = rte_eth_dev_count_avail();
1453 * Update the global var NUM_PORTS and global array PORTS
1454 * and get value of var VALID_NUM_PORTS according to system ports number
1456 valid_num_ports = check_ports_num(nb_ports);
1458 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1459 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1460 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1465 * FIXME: here we are trying to allocate mbufs big enough for
1466 * @MAX_QUEUES, but the truth is we're never going to use that
1467 * many queues here. We probably should only do allocation for
1468 * those queues we are going to use.
1470 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1471 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1473 if (vm2vm_mode == VM2VM_HARDWARE) {
1474 /* Enable VT loop back to let L2 switch to do it. */
1475 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1476 RTE_LOG(DEBUG, VHOST_CONFIG,
1477 "Enable loop back for L2 switch in vmdq.\n");
1480 /* initialize all ports */
1481 RTE_ETH_FOREACH_DEV(portid) {
1482 /* skip ports that are not enabled */
1483 if ((enabled_port_mask & (1 << portid)) == 0) {
1484 RTE_LOG(INFO, VHOST_PORT,
1485 "Skipping disabled port %d\n", portid);
1488 if (port_init(portid) != 0)
1489 rte_exit(EXIT_FAILURE,
1490 "Cannot initialize network ports\n");
1493 /* Enable stats if the user option is set. */
1495 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1498 rte_exit(EXIT_FAILURE,
1499 "Cannot create print-stats thread\n");
1502 /* Launch all data cores. */
1503 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1504 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1507 flags |= RTE_VHOST_USER_CLIENT;
1509 if (dequeue_zero_copy)
1510 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1512 /* Register vhost user driver to handle vhost messages. */
1513 for (i = 0; i < nb_sockets; i++) {
1514 char *file = socket_files + i * PATH_MAX;
1515 ret = rte_vhost_driver_register(file, flags);
1517 unregister_drivers(i);
1518 rte_exit(EXIT_FAILURE,
1519 "vhost driver register failure.\n");
1522 if (builtin_net_driver)
1523 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1525 if (mergeable == 0) {
1526 rte_vhost_driver_disable_features(file,
1527 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1530 if (enable_tx_csum == 0) {
1531 rte_vhost_driver_disable_features(file,
1532 1ULL << VIRTIO_NET_F_CSUM);
1535 if (enable_tso == 0) {
1536 rte_vhost_driver_disable_features(file,
1537 1ULL << VIRTIO_NET_F_HOST_TSO4);
1538 rte_vhost_driver_disable_features(file,
1539 1ULL << VIRTIO_NET_F_HOST_TSO6);
1540 rte_vhost_driver_disable_features(file,
1541 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1542 rte_vhost_driver_disable_features(file,
1543 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1547 rte_vhost_driver_enable_features(file,
1548 1ULL << VIRTIO_NET_F_CTRL_RX);
1551 ret = rte_vhost_driver_callback_register(file,
1552 &virtio_net_device_ops);
1554 rte_exit(EXIT_FAILURE,
1555 "failed to register vhost driver callbacks.\n");
1558 if (rte_vhost_driver_start(file) < 0) {
1559 rte_exit(EXIT_FAILURE,
1560 "failed to start vhost driver.\n");
1564 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1565 rte_eal_wait_lcore(lcore_id);