4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Size of buffers used for snprintfs. */
91 #define MAX_PRINT_BUFF 6072
93 /* Maximum character device basename size. */
94 #define MAX_BASENAME_SZ 10
96 /* Maximum long option length for option parsing. */
97 #define MAX_LONG_OPT_SZ 64
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask = 0;
102 /* Promiscuous mode */
103 static uint32_t promiscuous;
105 /* number of devices/queues to support*/
106 static uint32_t num_queues = 0;
107 static uint32_t num_devices;
109 static struct rte_mempool *mbuf_pool;
110 static int mergeable;
112 /* Do vlan strip on host, enabled on default */
113 static uint32_t vlan_strip = 1;
115 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
122 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
125 static uint32_t enable_stats = 0;
126 /* Enable retries on RX. */
127 static uint32_t enable_retry = 1;
129 /* Disable TX checksum offload */
130 static uint32_t enable_tx_csum;
132 /* Disable TSO offload */
133 static uint32_t enable_tso;
135 static int client_mode;
137 /* Specify timeout (in useconds) between retries on RX. */
138 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139 /* Specify the number of retries on RX. */
140 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
142 /* Character device basename. Can be set by user. */
143 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
145 /* empty vmdq configuration structure. Filled in programatically */
146 static struct rte_eth_conf vmdq_conf_default = {
148 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
150 .header_split = 0, /**< Header Split disabled */
151 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
152 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
154 * It is necessary for 1G NIC such as I350,
155 * this fixes bug of ipv4 forwarding in guest can't
156 * forward pakets from one virtio dev to another virtio dev.
158 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
159 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
160 .hw_strip_crc = 0, /**< CRC stripped by hardware */
164 .mq_mode = ETH_MQ_TX_NONE,
168 * should be overridden separately in code with
172 .nb_queue_pools = ETH_8_POOLS,
173 .enable_default_pool = 0,
176 .pool_map = {{0, 0},},
181 static unsigned lcore_ids[RTE_MAX_LCORE];
182 static uint8_t ports[RTE_MAX_ETHPORTS];
183 static unsigned num_ports = 0; /**< The number of ports specified in command line */
184 static uint16_t num_pf_queues, num_vmdq_queues;
185 static uint16_t vmdq_pool_base, vmdq_queue_base;
186 static uint16_t queues_per_pool;
188 const uint16_t vlan_tags[] = {
189 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
191 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
199 /* ethernet addresses of ports */
200 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
202 static struct vhost_dev_tailq_list vhost_dev_list =
203 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
205 static struct lcore_info lcore_info[RTE_MAX_LCORE];
207 /* Used for queueing bursts of TX packets. */
211 struct rte_mbuf *m_table[MAX_PKT_BURST];
214 /* TX queue for each data core. */
215 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
217 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
218 / US_PER_S * BURST_TX_DRAIN_US)
222 * Builds up the correct configuration for VMDQ VLAN pool map
223 * according to the pool & queue limits.
226 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
228 struct rte_eth_vmdq_rx_conf conf;
229 struct rte_eth_vmdq_rx_conf *def_conf =
230 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
233 memset(&conf, 0, sizeof(conf));
234 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235 conf.nb_pool_maps = num_devices;
236 conf.enable_loop_back = def_conf->enable_loop_back;
237 conf.rx_mode = def_conf->rx_mode;
239 for (i = 0; i < conf.nb_pool_maps; i++) {
240 conf.pool_map[i].vlan_id = vlan_tags[ i ];
241 conf.pool_map[i].pools = (1UL << i);
244 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
251 * Validate the device number according to the max pool number gotten form
252 * dev_info. If the device number is invalid, give the error message and
253 * return -1. Each device must have its own pool.
256 validate_num_devices(uint32_t max_nb_devices)
258 if (num_devices > max_nb_devices) {
259 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
266 * Initialises a given port using global settings and with the rx buffers
267 * coming from the mbuf_pool passed as parameter
270 port_init(uint8_t port)
272 struct rte_eth_dev_info dev_info;
273 struct rte_eth_conf port_conf;
274 struct rte_eth_rxconf *rxconf;
275 struct rte_eth_txconf *txconf;
276 int16_t rx_rings, tx_rings;
277 uint16_t rx_ring_size, tx_ring_size;
281 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282 rte_eth_dev_info_get (port, &dev_info);
284 if (dev_info.max_rx_queues > MAX_QUEUES) {
285 rte_exit(EXIT_FAILURE,
286 "please define MAX_QUEUES no less than %u in %s\n",
287 dev_info.max_rx_queues, __FILE__);
290 rxconf = &dev_info.default_rxconf;
291 txconf = &dev_info.default_txconf;
292 rxconf->rx_drop_en = 1;
294 /* Enable vlan offload */
295 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
297 /*configure the number of supported virtio devices based on VMDQ limits */
298 num_devices = dev_info.max_vmdq_pools;
300 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302 tx_rings = (uint16_t)rte_lcore_count();
304 retval = validate_num_devices(MAX_DEVICES);
308 /* Get port configuration. */
309 retval = get_eth_conf(&port_conf, num_devices);
312 /* NIC queues are divided into pf queues and vmdq queues. */
313 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 num_vmdq_queues = num_devices * queues_per_pool;
316 num_queues = num_pf_queues + num_vmdq_queues;
317 vmdq_queue_base = dev_info.vmdq_queue_base;
318 vmdq_pool_base = dev_info.vmdq_pool_base;
319 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 num_pf_queues, num_devices, queues_per_pool);
322 if (port >= rte_eth_dev_count()) return -1;
324 if (enable_tx_csum == 0)
325 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
327 if (enable_tso == 0) {
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
332 rx_rings = (uint16_t)dev_info.max_rx_queues;
333 /* Configure ethernet device. */
334 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
338 /* Setup the queues. */
339 for (q = 0; q < rx_rings; q ++) {
340 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
341 rte_eth_dev_socket_id(port),
347 for (q = 0; q < tx_rings; q ++) {
348 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
349 rte_eth_dev_socket_id(port),
355 /* Start the device. */
356 retval = rte_eth_dev_start(port);
358 RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n");
363 rte_eth_promiscuous_enable(port);
365 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
366 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
367 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
368 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
370 vmdq_ports_eth_addr[port].addr_bytes[0],
371 vmdq_ports_eth_addr[port].addr_bytes[1],
372 vmdq_ports_eth_addr[port].addr_bytes[2],
373 vmdq_ports_eth_addr[port].addr_bytes[3],
374 vmdq_ports_eth_addr[port].addr_bytes[4],
375 vmdq_ports_eth_addr[port].addr_bytes[5]);
381 * Set character device basename.
384 us_vhost_parse_basename(const char *q_arg)
386 /* parse number string */
388 if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
391 snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
397 * Parse the portmask provided at run time.
400 parse_portmask(const char *portmask)
407 /* parse hexadecimal string */
408 pm = strtoul(portmask, &end, 16);
409 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
420 * Parse num options at run time.
423 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
430 /* parse unsigned int string */
431 num = strtoul(q_arg, &end, 10);
432 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
435 if (num > max_valid_value)
446 us_vhost_usage(const char *prgname)
448 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
450 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
451 " --dev-basename <name>\n"
453 " -p PORTMASK: Set mask for ports to be used by application\n"
454 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
455 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
456 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
457 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
458 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
459 " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
460 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
461 " --dev-basename: The basename to be used for the character device.\n"
462 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
463 " --tso [0|1] disable/enable TCP segment offload.\n"
464 " --client register a vhost-user socket as client mode.\n",
469 * Parse the arguments given in the command line of the application.
472 us_vhost_parse_args(int argc, char **argv)
477 const char *prgname = argv[0];
478 static struct option long_option[] = {
479 {"vm2vm", required_argument, NULL, 0},
480 {"rx-retry", required_argument, NULL, 0},
481 {"rx-retry-delay", required_argument, NULL, 0},
482 {"rx-retry-num", required_argument, NULL, 0},
483 {"mergeable", required_argument, NULL, 0},
484 {"vlan-strip", required_argument, NULL, 0},
485 {"stats", required_argument, NULL, 0},
486 {"dev-basename", required_argument, NULL, 0},
487 {"tx-csum", required_argument, NULL, 0},
488 {"tso", required_argument, NULL, 0},
489 {"client", no_argument, &client_mode, 1},
493 /* Parse command line */
494 while ((opt = getopt_long(argc, argv, "p:P",
495 long_option, &option_index)) != EOF) {
499 enabled_port_mask = parse_portmask(optarg);
500 if (enabled_port_mask == 0) {
501 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
502 us_vhost_usage(prgname);
509 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
510 ETH_VMDQ_ACCEPT_BROADCAST |
511 ETH_VMDQ_ACCEPT_MULTICAST;
512 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
517 /* Enable/disable vm2vm comms. */
518 if (!strncmp(long_option[option_index].name, "vm2vm",
520 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
522 RTE_LOG(INFO, VHOST_CONFIG,
523 "Invalid argument for "
525 us_vhost_usage(prgname);
528 vm2vm_mode = (vm2vm_type)ret;
532 /* Enable/disable retries on RX. */
533 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
534 ret = parse_num_opt(optarg, 1);
536 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
537 us_vhost_usage(prgname);
544 /* Enable/disable TX checksum offload. */
545 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
546 ret = parse_num_opt(optarg, 1);
548 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
549 us_vhost_usage(prgname);
552 enable_tx_csum = ret;
555 /* Enable/disable TSO offload. */
556 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
557 ret = parse_num_opt(optarg, 1);
559 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
560 us_vhost_usage(prgname);
566 /* Specify the retries delay time (in useconds) on RX. */
567 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
568 ret = parse_num_opt(optarg, INT32_MAX);
570 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
571 us_vhost_usage(prgname);
574 burst_rx_delay_time = ret;
578 /* Specify the retries number on RX. */
579 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
580 ret = parse_num_opt(optarg, INT32_MAX);
582 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
583 us_vhost_usage(prgname);
586 burst_rx_retry_num = ret;
590 /* Enable/disable RX mergeable buffers. */
591 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
592 ret = parse_num_opt(optarg, 1);
594 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
595 us_vhost_usage(prgname);
600 vmdq_conf_default.rxmode.jumbo_frame = 1;
601 vmdq_conf_default.rxmode.max_rx_pkt_len
602 = JUMBO_FRAME_MAX_SIZE;
607 /* Enable/disable RX VLAN strip on host. */
608 if (!strncmp(long_option[option_index].name,
609 "vlan-strip", MAX_LONG_OPT_SZ)) {
610 ret = parse_num_opt(optarg, 1);
612 RTE_LOG(INFO, VHOST_CONFIG,
613 "Invalid argument for VLAN strip [0|1]\n");
614 us_vhost_usage(prgname);
618 vmdq_conf_default.rxmode.hw_vlan_strip =
623 /* Enable/disable stats. */
624 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
625 ret = parse_num_opt(optarg, INT32_MAX);
627 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
628 us_vhost_usage(prgname);
635 /* Set character device basename. */
636 if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
637 if (us_vhost_parse_basename(optarg) == -1) {
638 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
639 us_vhost_usage(prgname);
646 /* Invalid option - print options. */
648 us_vhost_usage(prgname);
653 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
654 if (enabled_port_mask & (1 << i))
655 ports[num_ports++] = (uint8_t)i;
658 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
659 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
660 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
668 * Update the global var NUM_PORTS and array PORTS according to system ports number
669 * and return valid ports number
671 static unsigned check_ports_num(unsigned nb_ports)
673 unsigned valid_num_ports = num_ports;
676 if (num_ports > nb_ports) {
677 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
678 num_ports, nb_ports);
679 num_ports = nb_ports;
682 for (portid = 0; portid < num_ports; portid ++) {
683 if (ports[portid] >= nb_ports) {
684 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
685 ports[portid], (nb_ports - 1));
686 ports[portid] = INVALID_PORT_ID;
690 return valid_num_ports;
693 static inline struct vhost_dev *__attribute__((always_inline))
694 find_vhost_dev(struct ether_addr *mac)
696 struct vhost_dev *vdev;
698 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
699 if (vdev->ready == DEVICE_RX &&
700 is_same_ether_addr(mac, &vdev->mac_address))
708 * This function learns the MAC address of the device and registers this along with a
709 * vlan tag to a VMDQ.
712 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
714 struct ether_hdr *pkt_hdr;
717 /* Learn MAC address of guest device from packet */
718 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
720 if (find_vhost_dev(&pkt_hdr->s_addr)) {
721 RTE_LOG(ERR, VHOST_DATA,
722 "(%d) device is using a registered MAC!\n",
727 for (i = 0; i < ETHER_ADDR_LEN; i++)
728 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
730 /* vlan_tag currently uses the device_id. */
731 vdev->vlan_tag = vlan_tags[vdev->vid];
733 /* Print out VMDQ registration info. */
734 RTE_LOG(INFO, VHOST_DATA,
735 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
737 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
738 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
739 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
742 /* Register the MAC address. */
743 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
744 (uint32_t)vdev->vid + vmdq_pool_base);
746 RTE_LOG(ERR, VHOST_DATA,
747 "(%d) failed to add device MAC address to VMDQ\n",
750 /* Enable stripping of the vlan tag as we handle routing. */
752 rte_eth_dev_set_vlan_strip_on_queue(ports[0],
753 (uint16_t)vdev->vmdq_rx_q, 1);
755 /* Set device as ready for RX. */
756 vdev->ready = DEVICE_RX;
762 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
763 * queue before disabling RX on the device.
766 unlink_vmdq(struct vhost_dev *vdev)
770 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
772 if (vdev->ready == DEVICE_RX) {
773 /*clear MAC and VLAN settings*/
774 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
775 for (i = 0; i < 6; i++)
776 vdev->mac_address.addr_bytes[i] = 0;
780 /*Clear out the receive buffers*/
781 rx_count = rte_eth_rx_burst(ports[0],
782 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
785 for (i = 0; i < rx_count; i++)
786 rte_pktmbuf_free(pkts_burst[i]);
788 rx_count = rte_eth_rx_burst(ports[0],
789 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
792 vdev->ready = DEVICE_MAC_LEARNING;
796 static inline void __attribute__((always_inline))
797 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
802 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
804 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
805 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
806 src_vdev->stats.tx_total++;
807 src_vdev->stats.tx += ret;
812 * Check if the packet destination MAC address is for a local device. If so then put
813 * the packet on that devices RX queue. If not then return.
815 static inline int __attribute__((always_inline))
816 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
818 struct ether_hdr *pkt_hdr;
819 struct vhost_dev *dst_vdev;
821 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
823 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
827 if (vdev->vid == dst_vdev->vid) {
828 RTE_LOG(DEBUG, VHOST_DATA,
829 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
834 RTE_LOG(DEBUG, VHOST_DATA,
835 "(%d) TX: MAC address is local\n", dst_vdev->vid);
837 if (unlikely(dst_vdev->remove)) {
838 RTE_LOG(DEBUG, VHOST_DATA,
839 "(%d) device is marked for removal\n", dst_vdev->vid);
843 virtio_xmit(dst_vdev, vdev, m);
848 * Check if the destination MAC of a packet is one local VM,
849 * and get its vlan tag, and offset if it is.
851 static inline int __attribute__((always_inline))
852 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
853 uint32_t *offset, uint16_t *vlan_tag)
855 struct vhost_dev *dst_vdev;
856 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
858 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
862 if (vdev->vid == dst_vdev->vid) {
863 RTE_LOG(DEBUG, VHOST_DATA,
864 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
870 * HW vlan strip will reduce the packet length
871 * by minus length of vlan tag, so need restore
872 * the packet length by plus it.
875 *vlan_tag = vlan_tags[vdev->vid];
877 RTE_LOG(DEBUG, VHOST_DATA,
878 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
879 vdev->vid, dst_vdev->vid, *vlan_tag);
885 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
887 if (ol_flags & PKT_TX_IPV4)
888 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
889 else /* assume ethertype == ETHER_TYPE_IPv6 */
890 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
893 static void virtio_tx_offload(struct rte_mbuf *m)
896 struct ipv4_hdr *ipv4_hdr = NULL;
897 struct tcp_hdr *tcp_hdr = NULL;
898 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
900 l3_hdr = (char *)eth_hdr + m->l2_len;
902 if (m->ol_flags & PKT_TX_IPV4) {
904 ipv4_hdr->hdr_checksum = 0;
905 m->ol_flags |= PKT_TX_IP_CKSUM;
908 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
909 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
913 free_pkts(struct rte_mbuf **pkts, uint16_t n)
916 rte_pktmbuf_free(pkts[n]);
919 static inline void __attribute__((always_inline))
920 do_drain_mbuf_table(struct mbuf_table *tx_q)
924 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
925 tx_q->m_table, tx_q->len);
926 if (unlikely(count < tx_q->len))
927 free_pkts(&tx_q->m_table[count], tx_q->len - count);
933 * This function routes the TX packet to the correct interface. This
934 * may be a local device or the physical port.
936 static inline void __attribute__((always_inline))
937 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
939 struct mbuf_table *tx_q;
941 const uint16_t lcore_id = rte_lcore_id();
942 struct ether_hdr *nh;
945 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
946 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
947 struct vhost_dev *vdev2;
949 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
950 virtio_xmit(vdev2, vdev, m);
955 /*check if destination is local VM*/
956 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
961 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
962 if (unlikely(find_local_dest(vdev, m, &offset,
969 RTE_LOG(DEBUG, VHOST_DATA,
970 "(%d) TX: MAC address is external\n", vdev->vid);
974 /*Add packet to the port tx queue*/
975 tx_q = &lcore_tx_queue[lcore_id];
977 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
978 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
979 /* Guest has inserted the vlan tag. */
980 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
981 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
982 if ((vm2vm_mode == VM2VM_HARDWARE) &&
983 (vh->vlan_tci != vlan_tag_be))
984 vh->vlan_tci = vlan_tag_be;
986 m->ol_flags |= PKT_TX_VLAN_PKT;
989 * Find the right seg to adjust the data len when offset is
990 * bigger than tail room size.
992 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
993 if (likely(offset <= rte_pktmbuf_tailroom(m)))
994 m->data_len += offset;
996 struct rte_mbuf *seg = m;
998 while ((seg->next != NULL) &&
999 (offset > rte_pktmbuf_tailroom(seg)))
1002 seg->data_len += offset;
1004 m->pkt_len += offset;
1007 m->vlan_tci = vlan_tag;
1010 if (m->ol_flags & PKT_TX_TCP_SEG)
1011 virtio_tx_offload(m);
1013 tx_q->m_table[tx_q->len++] = m;
1015 vdev->stats.tx_total++;
1019 if (unlikely(tx_q->len == MAX_PKT_BURST))
1020 do_drain_mbuf_table(tx_q);
1024 static inline void __attribute__((always_inline))
1025 drain_mbuf_table(struct mbuf_table *tx_q)
1027 static uint64_t prev_tsc;
1033 cur_tsc = rte_rdtsc();
1034 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1037 RTE_LOG(DEBUG, VHOST_DATA,
1038 "TX queue drained after timeout with burst size %u\n",
1040 do_drain_mbuf_table(tx_q);
1044 static inline void __attribute__((always_inline))
1045 drain_eth_rx(struct vhost_dev *vdev)
1047 uint16_t rx_count, enqueue_count;
1048 struct rte_mbuf *pkts[MAX_PKT_BURST];
1050 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1051 pkts, MAX_PKT_BURST);
1056 * When "enable_retry" is set, here we wait and retry when there
1057 * is no enough free slots in the queue to hold @rx_count packets,
1058 * to diminish packet loss.
1061 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1065 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1066 rte_delay_us(burst_rx_delay_time);
1067 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1073 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1076 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1077 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1080 free_pkts(pkts, rx_count);
1083 static inline void __attribute__((always_inline))
1084 drain_virtio_tx(struct vhost_dev *vdev)
1086 struct rte_mbuf *pkts[MAX_PKT_BURST];
1090 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1091 pkts, MAX_PKT_BURST);
1093 /* setup VMDq for the first packet */
1094 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1095 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1096 free_pkts(pkts, count);
1099 for (i = 0; i < count; ++i)
1100 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1104 * Main function of vhost-switch. It basically does:
1106 * for each vhost device {
1109 * Which drains the host eth Rx queue linked to the vhost device,
1110 * and deliver all of them to guest virito Rx ring associated with
1111 * this vhost device.
1113 * - drain_virtio_tx()
1115 * Which drains the guest virtio Tx queue and deliver all of them
1116 * to the target, which could be another vhost device, or the
1117 * physical eth dev. The route is done in function "virtio_tx_route".
1121 switch_worker(void *arg __rte_unused)
1124 unsigned lcore_id = rte_lcore_id();
1125 struct vhost_dev *vdev;
1126 struct mbuf_table *tx_q;
1128 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1130 tx_q = &lcore_tx_queue[lcore_id];
1131 for (i = 0; i < rte_lcore_count(); i++) {
1132 if (lcore_ids[i] == lcore_id) {
1139 drain_mbuf_table(tx_q);
1142 * Inform the configuration core that we have exited the
1143 * linked list and that no devices are in use if requested.
1145 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1146 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1149 * Process vhost devices
1151 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1153 if (unlikely(vdev->remove)) {
1155 vdev->ready = DEVICE_SAFE_REMOVE;
1159 if (likely(vdev->ready == DEVICE_RX))
1162 if (likely(!vdev->remove))
1163 drain_virtio_tx(vdev);
1171 * Remove a device from the specific data core linked list and from the
1172 * main linked list. Synchonization occurs through the use of the
1173 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1174 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1177 destroy_device(int vid)
1179 struct vhost_dev *vdev = NULL;
1182 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1183 if (vdev->vid == vid)
1188 /*set the remove flag. */
1190 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1194 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1196 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1199 /* Set the dev_removal_flag on each lcore. */
1200 RTE_LCORE_FOREACH_SLAVE(lcore)
1201 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1204 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1205 * we can be sure that they can no longer access the device removed
1206 * from the linked lists and that the devices are no longer in use.
1208 RTE_LCORE_FOREACH_SLAVE(lcore) {
1209 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1213 lcore_info[vdev->coreid].device_num--;
1215 RTE_LOG(INFO, VHOST_DATA,
1216 "(%d) device has been removed from data core\n",
1223 * A new device is added to a data core. First the device is added to the main linked list
1224 * and the allocated to a specific data core.
1229 int lcore, core_add = 0;
1230 uint32_t device_num_min = num_devices;
1231 struct vhost_dev *vdev;
1233 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1235 RTE_LOG(INFO, VHOST_DATA,
1236 "(%d) couldn't allocate memory for vhost dev\n",
1242 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1243 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1245 /*reset ready flag*/
1246 vdev->ready = DEVICE_MAC_LEARNING;
1249 /* Find a suitable lcore to add the device. */
1250 RTE_LCORE_FOREACH_SLAVE(lcore) {
1251 if (lcore_info[lcore].device_num < device_num_min) {
1252 device_num_min = lcore_info[lcore].device_num;
1256 vdev->coreid = core_add;
1258 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1260 lcore_info[vdev->coreid].device_num++;
1262 /* Disable notifications. */
1263 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1264 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1266 RTE_LOG(INFO, VHOST_DATA,
1267 "(%d) device has been added to data core %d\n",
1274 * These callback allow devices to be added to the data core when configuration
1275 * has been fully complete.
1277 static const struct virtio_net_device_ops virtio_net_device_ops =
1279 .new_device = new_device,
1280 .destroy_device = destroy_device,
1284 * This is a thread will wake up after a period to print stats if the user has
1290 struct vhost_dev *vdev;
1291 uint64_t tx_dropped, rx_dropped;
1292 uint64_t tx, tx_total, rx, rx_total;
1293 const char clr[] = { 27, '[', '2', 'J', '\0' };
1294 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1297 sleep(enable_stats);
1299 /* Clear screen and move to top left */
1300 printf("%s%s\n", clr, top_left);
1301 printf("Device statistics =================================\n");
1303 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1304 tx_total = vdev->stats.tx_total;
1305 tx = vdev->stats.tx;
1306 tx_dropped = tx_total - tx;
1308 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1309 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1310 rx_dropped = rx_total - rx;
1312 printf("Statistics for device %d\n"
1313 "-----------------------\n"
1314 "TX total: %" PRIu64 "\n"
1315 "TX dropped: %" PRIu64 "\n"
1316 "TX successful: %" PRIu64 "\n"
1317 "RX total: %" PRIu64 "\n"
1318 "RX dropped: %" PRIu64 "\n"
1319 "RX successful: %" PRIu64 "\n",
1321 tx_total, tx_dropped, tx,
1322 rx_total, rx_dropped, rx);
1325 printf("===================================================\n");
1329 /* When we receive a INT signal, unregister vhost driver */
1331 sigint_handler(__rte_unused int signum)
1333 /* Unregister vhost driver. */
1334 int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1336 rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1341 * While creating an mbuf pool, one key thing is to figure out how
1342 * many mbuf entries is enough for our use. FYI, here are some
1345 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1347 * - For each switch core (A CPU core does the packet switch), we need
1348 * also make some reservation for receiving the packets from virtio
1349 * Tx queue. How many is enough depends on the usage. It's normally
1350 * a simple calculation like following:
1352 * MAX_PKT_BURST * max packet size / mbuf size
1354 * So, we definitely need allocate more mbufs when TSO is enabled.
1356 * - Similarly, for each switching core, we should serve @nr_rx_desc
1357 * mbufs for receiving the packets from physical NIC device.
1359 * - We also need make sure, for each switch core, we have allocated
1360 * enough mbufs to fill up the mbuf cache.
1363 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1364 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1367 uint32_t nr_mbufs_per_core;
1368 uint32_t mtu = 1500;
1375 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1376 (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1377 nr_mbufs_per_core += nr_rx_desc;
1378 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1380 nr_mbufs = nr_queues * nr_rx_desc;
1381 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1382 nr_mbufs *= nr_port;
1384 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1385 nr_mbuf_cache, 0, mbuf_size,
1387 if (mbuf_pool == NULL)
1388 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1392 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1393 * device is also registered here to handle the IOCTLs.
1396 main(int argc, char *argv[])
1398 unsigned lcore_id, core_id = 0;
1399 unsigned nb_ports, valid_num_ports;
1402 static pthread_t tid;
1403 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1406 signal(SIGINT, sigint_handler);
1409 ret = rte_eal_init(argc, argv);
1411 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1415 /* parse app arguments */
1416 ret = us_vhost_parse_args(argc, argv);
1418 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1420 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1421 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1423 if (rte_lcore_is_enabled(lcore_id))
1424 lcore_ids[core_id ++] = lcore_id;
1426 if (rte_lcore_count() > RTE_MAX_LCORE)
1427 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1429 /* Get the number of physical ports. */
1430 nb_ports = rte_eth_dev_count();
1433 * Update the global var NUM_PORTS and global array PORTS
1434 * and get value of var VALID_NUM_PORTS according to system ports number
1436 valid_num_ports = check_ports_num(nb_ports);
1438 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1439 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1440 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1445 * FIXME: here we are trying to allocate mbufs big enough for
1446 * @MAX_QUEUES, but the truth is we're never going to use that
1447 * many queues here. We probably should only do allocation for
1448 * those queues we are going to use.
1450 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1451 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1453 if (vm2vm_mode == VM2VM_HARDWARE) {
1454 /* Enable VT loop back to let L2 switch to do it. */
1455 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1456 RTE_LOG(DEBUG, VHOST_CONFIG,
1457 "Enable loop back for L2 switch in vmdq.\n");
1460 /* initialize all ports */
1461 for (portid = 0; portid < nb_ports; portid++) {
1462 /* skip ports that are not enabled */
1463 if ((enabled_port_mask & (1 << portid)) == 0) {
1464 RTE_LOG(INFO, VHOST_PORT,
1465 "Skipping disabled port %d\n", portid);
1468 if (port_init(portid) != 0)
1469 rte_exit(EXIT_FAILURE,
1470 "Cannot initialize network ports\n");
1473 /* Enable stats if the user option is set. */
1475 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1477 rte_exit(EXIT_FAILURE,
1478 "Cannot create print-stats thread\n");
1480 /* Set thread_name for aid in debugging. */
1481 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1482 ret = rte_thread_setname(tid, thread_name);
1484 RTE_LOG(DEBUG, VHOST_CONFIG,
1485 "Cannot set print-stats name\n");
1488 /* Launch all data cores. */
1489 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1490 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1493 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1496 flags |= RTE_VHOST_USER_CLIENT;
1498 /* Register vhost(cuse or user) driver to handle vhost messages. */
1499 ret = rte_vhost_driver_register(dev_basename, flags);
1501 rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1503 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1505 /* Start CUSE session. */
1506 rte_vhost_driver_session_start();