1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Maximum long option length for option parsing. */
59 #define MAX_LONG_OPT_SZ 64
61 /* mask of enabled ports */
62 static uint32_t enabled_port_mask = 0;
64 /* Promiscuous mode */
65 static uint32_t promiscuous;
67 /* number of devices/queues to support*/
68 static uint32_t num_queues = 0;
69 static uint32_t num_devices;
71 static struct rte_mempool *mbuf_pool;
74 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
81 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
84 static uint32_t enable_stats = 0;
85 /* Enable retries on RX. */
86 static uint32_t enable_retry = 1;
88 /* Disable TX checksum offload */
89 static uint32_t enable_tx_csum;
91 /* Disable TSO offload */
92 static uint32_t enable_tso;
94 static int client_mode;
95 static int dequeue_zero_copy;
97 static int builtin_net_driver;
99 /* Specify timeout (in useconds) between retries on RX. */
100 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
101 /* Specify the number of retries on RX. */
102 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
104 /* Socket file paths. Can be set by user */
105 static char *socket_files;
106 static int nb_sockets;
108 /* empty vmdq configuration structure. Filled in programatically */
109 static struct rte_eth_conf vmdq_conf_default = {
111 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
114 * VLAN strip is necessary for 1G NIC such as I350,
115 * this fixes bug of ipv4 forwarding in guest can't
116 * forward pakets from one virtio dev to another virtio dev.
118 .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
122 .mq_mode = ETH_MQ_TX_NONE,
123 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
124 DEV_TX_OFFLOAD_TCP_CKSUM |
125 DEV_TX_OFFLOAD_VLAN_INSERT |
126 DEV_TX_OFFLOAD_MULTI_SEGS |
127 DEV_TX_OFFLOAD_TCP_TSO),
131 * should be overridden separately in code with
135 .nb_queue_pools = ETH_8_POOLS,
136 .enable_default_pool = 0,
139 .pool_map = {{0, 0},},
145 static unsigned lcore_ids[RTE_MAX_LCORE];
146 static uint16_t ports[RTE_MAX_ETHPORTS];
147 static unsigned num_ports = 0; /**< The number of ports specified in command line */
148 static uint16_t num_pf_queues, num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 static uint16_t queues_per_pool;
152 const uint16_t vlan_tags[] = {
153 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
154 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
155 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
156 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
157 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
158 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
159 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
160 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
163 /* ethernet addresses of ports */
164 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
166 static struct vhost_dev_tailq_list vhost_dev_list =
167 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
169 static struct lcore_info lcore_info[RTE_MAX_LCORE];
171 /* Used for queueing bursts of TX packets. */
175 struct rte_mbuf *m_table[MAX_PKT_BURST];
178 /* TX queue for each data core. */
179 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
181 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
182 / US_PER_S * BURST_TX_DRAIN_US)
186 * Builds up the correct configuration for VMDQ VLAN pool map
187 * according to the pool & queue limits.
190 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
192 struct rte_eth_vmdq_rx_conf conf;
193 struct rte_eth_vmdq_rx_conf *def_conf =
194 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
197 memset(&conf, 0, sizeof(conf));
198 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
199 conf.nb_pool_maps = num_devices;
200 conf.enable_loop_back = def_conf->enable_loop_back;
201 conf.rx_mode = def_conf->rx_mode;
203 for (i = 0; i < conf.nb_pool_maps; i++) {
204 conf.pool_map[i].vlan_id = vlan_tags[ i ];
205 conf.pool_map[i].pools = (1UL << i);
208 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
209 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
210 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
215 * Initialises a given port using global settings and with the rx buffers
216 * coming from the mbuf_pool passed as parameter
219 port_init(uint16_t port)
221 struct rte_eth_dev_info dev_info;
222 struct rte_eth_conf port_conf;
223 struct rte_eth_rxconf *rxconf;
224 struct rte_eth_txconf *txconf;
225 int16_t rx_rings, tx_rings;
226 uint16_t rx_ring_size, tx_ring_size;
230 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
231 rte_eth_dev_info_get (port, &dev_info);
233 rxconf = &dev_info.default_rxconf;
234 txconf = &dev_info.default_txconf;
235 rxconf->rx_drop_en = 1;
237 /*configure the number of supported virtio devices based on VMDQ limits */
238 num_devices = dev_info.max_vmdq_pools;
240 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
241 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
244 * When dequeue zero copy is enabled, guest Tx used vring will be
245 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
246 * (tx_ring_size here) must be small enough so that the driver will
247 * hit the free threshold easily and free mbufs timely. Otherwise,
248 * guest Tx vring would be starved.
250 if (dequeue_zero_copy)
253 tx_rings = (uint16_t)rte_lcore_count();
255 /* Get port configuration. */
256 retval = get_eth_conf(&port_conf, num_devices);
259 /* NIC queues are divided into pf queues and vmdq queues. */
260 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
261 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
262 num_vmdq_queues = num_devices * queues_per_pool;
263 num_queues = num_pf_queues + num_vmdq_queues;
264 vmdq_queue_base = dev_info.vmdq_queue_base;
265 vmdq_pool_base = dev_info.vmdq_pool_base;
266 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
267 num_pf_queues, num_devices, queues_per_pool);
269 if (!rte_eth_dev_is_valid_port(port))
272 rx_rings = (uint16_t)dev_info.max_rx_queues;
273 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
274 port_conf.txmode.offloads |=
275 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
276 /* Configure ethernet device. */
277 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
279 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
280 port, strerror(-retval));
284 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
287 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
288 "for port %u: %s.\n", port, strerror(-retval));
291 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
292 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
293 "for Rx queues on port %u.\n", port);
297 /* Setup the queues. */
298 rxconf->offloads = port_conf.rxmode.offloads;
299 for (q = 0; q < rx_rings; q ++) {
300 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
301 rte_eth_dev_socket_id(port),
305 RTE_LOG(ERR, VHOST_PORT,
306 "Failed to setup rx queue %u of port %u: %s.\n",
307 q, port, strerror(-retval));
311 txconf->offloads = port_conf.txmode.offloads;
312 for (q = 0; q < tx_rings; q ++) {
313 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
314 rte_eth_dev_socket_id(port),
317 RTE_LOG(ERR, VHOST_PORT,
318 "Failed to setup tx queue %u of port %u: %s.\n",
319 q, port, strerror(-retval));
324 /* Start the device. */
325 retval = rte_eth_dev_start(port);
327 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
328 port, strerror(-retval));
333 rte_eth_promiscuous_enable(port);
335 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
336 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
337 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
338 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
340 vmdq_ports_eth_addr[port].addr_bytes[0],
341 vmdq_ports_eth_addr[port].addr_bytes[1],
342 vmdq_ports_eth_addr[port].addr_bytes[2],
343 vmdq_ports_eth_addr[port].addr_bytes[3],
344 vmdq_ports_eth_addr[port].addr_bytes[4],
345 vmdq_ports_eth_addr[port].addr_bytes[5]);
351 * Set socket file path.
354 us_vhost_parse_socket_path(const char *q_arg)
358 /* parse number string */
359 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
363 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
364 if (socket_files == NULL) {
369 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
376 * Parse the portmask provided at run time.
379 parse_portmask(const char *portmask)
386 /* parse hexadecimal string */
387 pm = strtoul(portmask, &end, 16);
388 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
399 * Parse num options at run time.
402 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
409 /* parse unsigned int string */
410 num = strtoul(q_arg, &end, 10);
411 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
414 if (num > max_valid_value)
425 us_vhost_usage(const char *prgname)
427 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
429 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
430 " --socket-file <path>\n"
432 " -p PORTMASK: Set mask for ports to be used by application\n"
433 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
434 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
435 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
436 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
437 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
438 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
439 " --socket-file: The path of the socket file.\n"
440 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
441 " --tso [0|1] disable/enable TCP segment offload.\n"
442 " --client register a vhost-user socket as client mode.\n"
443 " --dequeue-zero-copy enables dequeue zero copy\n",
448 * Parse the arguments given in the command line of the application.
451 us_vhost_parse_args(int argc, char **argv)
456 const char *prgname = argv[0];
457 static struct option long_option[] = {
458 {"vm2vm", required_argument, NULL, 0},
459 {"rx-retry", required_argument, NULL, 0},
460 {"rx-retry-delay", required_argument, NULL, 0},
461 {"rx-retry-num", required_argument, NULL, 0},
462 {"mergeable", required_argument, NULL, 0},
463 {"stats", required_argument, NULL, 0},
464 {"socket-file", required_argument, NULL, 0},
465 {"tx-csum", required_argument, NULL, 0},
466 {"tso", required_argument, NULL, 0},
467 {"client", no_argument, &client_mode, 1},
468 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
469 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
473 /* Parse command line */
474 while ((opt = getopt_long(argc, argv, "p:P",
475 long_option, &option_index)) != EOF) {
479 enabled_port_mask = parse_portmask(optarg);
480 if (enabled_port_mask == 0) {
481 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
482 us_vhost_usage(prgname);
489 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
490 ETH_VMDQ_ACCEPT_BROADCAST |
491 ETH_VMDQ_ACCEPT_MULTICAST;
496 /* Enable/disable vm2vm comms. */
497 if (!strncmp(long_option[option_index].name, "vm2vm",
499 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
501 RTE_LOG(INFO, VHOST_CONFIG,
502 "Invalid argument for "
504 us_vhost_usage(prgname);
507 vm2vm_mode = (vm2vm_type)ret;
511 /* Enable/disable retries on RX. */
512 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
513 ret = parse_num_opt(optarg, 1);
515 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
516 us_vhost_usage(prgname);
523 /* Enable/disable TX checksum offload. */
524 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
525 ret = parse_num_opt(optarg, 1);
527 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
528 us_vhost_usage(prgname);
531 enable_tx_csum = ret;
534 /* Enable/disable TSO offload. */
535 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
536 ret = parse_num_opt(optarg, 1);
538 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
539 us_vhost_usage(prgname);
545 /* Specify the retries delay time (in useconds) on RX. */
546 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
547 ret = parse_num_opt(optarg, INT32_MAX);
549 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
550 us_vhost_usage(prgname);
553 burst_rx_delay_time = ret;
557 /* Specify the retries number on RX. */
558 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
559 ret = parse_num_opt(optarg, INT32_MAX);
561 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
562 us_vhost_usage(prgname);
565 burst_rx_retry_num = ret;
569 /* Enable/disable RX mergeable buffers. */
570 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
571 ret = parse_num_opt(optarg, 1);
573 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
574 us_vhost_usage(prgname);
579 vmdq_conf_default.rxmode.offloads |=
580 DEV_RX_OFFLOAD_JUMBO_FRAME;
581 vmdq_conf_default.rxmode.max_rx_pkt_len
582 = JUMBO_FRAME_MAX_SIZE;
587 /* Enable/disable stats. */
588 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
589 ret = parse_num_opt(optarg, INT32_MAX);
591 RTE_LOG(INFO, VHOST_CONFIG,
592 "Invalid argument for stats [0..N]\n");
593 us_vhost_usage(prgname);
600 /* Set socket file path. */
601 if (!strncmp(long_option[option_index].name,
602 "socket-file", MAX_LONG_OPT_SZ)) {
603 if (us_vhost_parse_socket_path(optarg) == -1) {
604 RTE_LOG(INFO, VHOST_CONFIG,
605 "Invalid argument for socket name (Max %d characters)\n",
607 us_vhost_usage(prgname);
614 /* Invalid option - print options. */
616 us_vhost_usage(prgname);
621 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
622 if (enabled_port_mask & (1 << i))
623 ports[num_ports++] = i;
626 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
627 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
628 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
636 * Update the global var NUM_PORTS and array PORTS according to system ports number
637 * and return valid ports number
639 static unsigned check_ports_num(unsigned nb_ports)
641 unsigned valid_num_ports = num_ports;
644 if (num_ports > nb_ports) {
645 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
646 num_ports, nb_ports);
647 num_ports = nb_ports;
650 for (portid = 0; portid < num_ports; portid ++) {
651 if (!rte_eth_dev_is_valid_port(ports[portid])) {
652 RTE_LOG(INFO, VHOST_PORT,
653 "\nSpecified port ID(%u) is not valid\n",
655 ports[portid] = INVALID_PORT_ID;
659 return valid_num_ports;
662 static __rte_always_inline struct vhost_dev *
663 find_vhost_dev(struct ether_addr *mac)
665 struct vhost_dev *vdev;
667 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
668 if (vdev->ready == DEVICE_RX &&
669 is_same_ether_addr(mac, &vdev->mac_address))
677 * This function learns the MAC address of the device and registers this along with a
678 * vlan tag to a VMDQ.
681 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
683 struct ether_hdr *pkt_hdr;
686 /* Learn MAC address of guest device from packet */
687 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
689 if (find_vhost_dev(&pkt_hdr->s_addr)) {
690 RTE_LOG(ERR, VHOST_DATA,
691 "(%d) device is using a registered MAC!\n",
696 for (i = 0; i < ETHER_ADDR_LEN; i++)
697 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
699 /* vlan_tag currently uses the device_id. */
700 vdev->vlan_tag = vlan_tags[vdev->vid];
702 /* Print out VMDQ registration info. */
703 RTE_LOG(INFO, VHOST_DATA,
704 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
706 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
707 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
708 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
711 /* Register the MAC address. */
712 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
713 (uint32_t)vdev->vid + vmdq_pool_base);
715 RTE_LOG(ERR, VHOST_DATA,
716 "(%d) failed to add device MAC address to VMDQ\n",
719 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
721 /* Set device as ready for RX. */
722 vdev->ready = DEVICE_RX;
728 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
729 * queue before disabling RX on the device.
732 unlink_vmdq(struct vhost_dev *vdev)
736 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
738 if (vdev->ready == DEVICE_RX) {
739 /*clear MAC and VLAN settings*/
740 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
741 for (i = 0; i < 6; i++)
742 vdev->mac_address.addr_bytes[i] = 0;
746 /*Clear out the receive buffers*/
747 rx_count = rte_eth_rx_burst(ports[0],
748 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
751 for (i = 0; i < rx_count; i++)
752 rte_pktmbuf_free(pkts_burst[i]);
754 rx_count = rte_eth_rx_burst(ports[0],
755 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
758 vdev->ready = DEVICE_MAC_LEARNING;
762 static __rte_always_inline void
763 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
768 if (builtin_net_driver) {
769 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
771 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
775 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
776 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
777 src_vdev->stats.tx_total++;
778 src_vdev->stats.tx += ret;
783 * Check if the packet destination MAC address is for a local device. If so then put
784 * the packet on that devices RX queue. If not then return.
786 static __rte_always_inline int
787 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
789 struct ether_hdr *pkt_hdr;
790 struct vhost_dev *dst_vdev;
792 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
794 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
798 if (vdev->vid == dst_vdev->vid) {
799 RTE_LOG_DP(DEBUG, VHOST_DATA,
800 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
805 RTE_LOG_DP(DEBUG, VHOST_DATA,
806 "(%d) TX: MAC address is local\n", dst_vdev->vid);
808 if (unlikely(dst_vdev->remove)) {
809 RTE_LOG_DP(DEBUG, VHOST_DATA,
810 "(%d) device is marked for removal\n", dst_vdev->vid);
814 virtio_xmit(dst_vdev, vdev, m);
819 * Check if the destination MAC of a packet is one local VM,
820 * and get its vlan tag, and offset if it is.
822 static __rte_always_inline int
823 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
824 uint32_t *offset, uint16_t *vlan_tag)
826 struct vhost_dev *dst_vdev;
827 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
829 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
833 if (vdev->vid == dst_vdev->vid) {
834 RTE_LOG_DP(DEBUG, VHOST_DATA,
835 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
841 * HW vlan strip will reduce the packet length
842 * by minus length of vlan tag, so need restore
843 * the packet length by plus it.
846 *vlan_tag = vlan_tags[vdev->vid];
848 RTE_LOG_DP(DEBUG, VHOST_DATA,
849 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
850 vdev->vid, dst_vdev->vid, *vlan_tag);
856 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
858 if (ol_flags & PKT_TX_IPV4)
859 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
860 else /* assume ethertype == ETHER_TYPE_IPv6 */
861 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
864 static void virtio_tx_offload(struct rte_mbuf *m)
867 struct ipv4_hdr *ipv4_hdr = NULL;
868 struct tcp_hdr *tcp_hdr = NULL;
869 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
871 l3_hdr = (char *)eth_hdr + m->l2_len;
873 if (m->ol_flags & PKT_TX_IPV4) {
875 ipv4_hdr->hdr_checksum = 0;
876 m->ol_flags |= PKT_TX_IP_CKSUM;
879 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
880 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
884 free_pkts(struct rte_mbuf **pkts, uint16_t n)
887 rte_pktmbuf_free(pkts[n]);
890 static __rte_always_inline void
891 do_drain_mbuf_table(struct mbuf_table *tx_q)
895 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
896 tx_q->m_table, tx_q->len);
897 if (unlikely(count < tx_q->len))
898 free_pkts(&tx_q->m_table[count], tx_q->len - count);
904 * This function routes the TX packet to the correct interface. This
905 * may be a local device or the physical port.
907 static __rte_always_inline void
908 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
910 struct mbuf_table *tx_q;
912 const uint16_t lcore_id = rte_lcore_id();
913 struct ether_hdr *nh;
916 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
917 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
918 struct vhost_dev *vdev2;
920 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
922 virtio_xmit(vdev2, vdev, m);
927 /*check if destination is local VM*/
928 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
933 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
934 if (unlikely(find_local_dest(vdev, m, &offset,
941 RTE_LOG_DP(DEBUG, VHOST_DATA,
942 "(%d) TX: MAC address is external\n", vdev->vid);
946 /*Add packet to the port tx queue*/
947 tx_q = &lcore_tx_queue[lcore_id];
949 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
950 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
951 /* Guest has inserted the vlan tag. */
952 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
953 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
954 if ((vm2vm_mode == VM2VM_HARDWARE) &&
955 (vh->vlan_tci != vlan_tag_be))
956 vh->vlan_tci = vlan_tag_be;
958 m->ol_flags |= PKT_TX_VLAN_PKT;
961 * Find the right seg to adjust the data len when offset is
962 * bigger than tail room size.
964 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
965 if (likely(offset <= rte_pktmbuf_tailroom(m)))
966 m->data_len += offset;
968 struct rte_mbuf *seg = m;
970 while ((seg->next != NULL) &&
971 (offset > rte_pktmbuf_tailroom(seg)))
974 seg->data_len += offset;
976 m->pkt_len += offset;
979 m->vlan_tci = vlan_tag;
982 if (m->ol_flags & PKT_TX_TCP_SEG)
983 virtio_tx_offload(m);
985 tx_q->m_table[tx_q->len++] = m;
987 vdev->stats.tx_total++;
991 if (unlikely(tx_q->len == MAX_PKT_BURST))
992 do_drain_mbuf_table(tx_q);
996 static __rte_always_inline void
997 drain_mbuf_table(struct mbuf_table *tx_q)
999 static uint64_t prev_tsc;
1005 cur_tsc = rte_rdtsc();
1006 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1009 RTE_LOG_DP(DEBUG, VHOST_DATA,
1010 "TX queue drained after timeout with burst size %u\n",
1012 do_drain_mbuf_table(tx_q);
1016 static __rte_always_inline void
1017 drain_eth_rx(struct vhost_dev *vdev)
1019 uint16_t rx_count, enqueue_count;
1020 struct rte_mbuf *pkts[MAX_PKT_BURST];
1022 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1023 pkts, MAX_PKT_BURST);
1028 * When "enable_retry" is set, here we wait and retry when there
1029 * is no enough free slots in the queue to hold @rx_count packets,
1030 * to diminish packet loss.
1033 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1037 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1038 rte_delay_us(burst_rx_delay_time);
1039 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1045 if (builtin_net_driver) {
1046 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1049 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1053 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1054 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1057 free_pkts(pkts, rx_count);
1060 static __rte_always_inline void
1061 drain_virtio_tx(struct vhost_dev *vdev)
1063 struct rte_mbuf *pkts[MAX_PKT_BURST];
1067 if (builtin_net_driver) {
1068 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1069 pkts, MAX_PKT_BURST);
1071 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1072 mbuf_pool, pkts, MAX_PKT_BURST);
1075 /* setup VMDq for the first packet */
1076 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1077 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1078 free_pkts(pkts, count);
1081 for (i = 0; i < count; ++i)
1082 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1086 * Main function of vhost-switch. It basically does:
1088 * for each vhost device {
1091 * Which drains the host eth Rx queue linked to the vhost device,
1092 * and deliver all of them to guest virito Rx ring associated with
1093 * this vhost device.
1095 * - drain_virtio_tx()
1097 * Which drains the guest virtio Tx queue and deliver all of them
1098 * to the target, which could be another vhost device, or the
1099 * physical eth dev. The route is done in function "virtio_tx_route".
1103 switch_worker(void *arg __rte_unused)
1106 unsigned lcore_id = rte_lcore_id();
1107 struct vhost_dev *vdev;
1108 struct mbuf_table *tx_q;
1110 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1112 tx_q = &lcore_tx_queue[lcore_id];
1113 for (i = 0; i < rte_lcore_count(); i++) {
1114 if (lcore_ids[i] == lcore_id) {
1121 drain_mbuf_table(tx_q);
1124 * Inform the configuration core that we have exited the
1125 * linked list and that no devices are in use if requested.
1127 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1128 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1131 * Process vhost devices
1133 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1135 if (unlikely(vdev->remove)) {
1137 vdev->ready = DEVICE_SAFE_REMOVE;
1141 if (likely(vdev->ready == DEVICE_RX))
1144 if (likely(!vdev->remove))
1145 drain_virtio_tx(vdev);
1153 * Remove a device from the specific data core linked list and from the
1154 * main linked list. Synchonization occurs through the use of the
1155 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1156 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1159 destroy_device(int vid)
1161 struct vhost_dev *vdev = NULL;
1164 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1165 if (vdev->vid == vid)
1170 /*set the remove flag. */
1172 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1176 if (builtin_net_driver)
1177 vs_vhost_net_remove(vdev);
1179 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1181 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1184 /* Set the dev_removal_flag on each lcore. */
1185 RTE_LCORE_FOREACH_SLAVE(lcore)
1186 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1189 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1190 * we can be sure that they can no longer access the device removed
1191 * from the linked lists and that the devices are no longer in use.
1193 RTE_LCORE_FOREACH_SLAVE(lcore) {
1194 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1198 lcore_info[vdev->coreid].device_num--;
1200 RTE_LOG(INFO, VHOST_DATA,
1201 "(%d) device has been removed from data core\n",
1208 * A new device is added to a data core. First the device is added to the main linked list
1209 * and then allocated to a specific data core.
1214 int lcore, core_add = 0;
1215 uint32_t device_num_min = num_devices;
1216 struct vhost_dev *vdev;
1218 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1220 RTE_LOG(INFO, VHOST_DATA,
1221 "(%d) couldn't allocate memory for vhost dev\n",
1227 if (builtin_net_driver)
1228 vs_vhost_net_setup(vdev);
1230 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1231 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1233 /*reset ready flag*/
1234 vdev->ready = DEVICE_MAC_LEARNING;
1237 /* Find a suitable lcore to add the device. */
1238 RTE_LCORE_FOREACH_SLAVE(lcore) {
1239 if (lcore_info[lcore].device_num < device_num_min) {
1240 device_num_min = lcore_info[lcore].device_num;
1244 vdev->coreid = core_add;
1246 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1248 lcore_info[vdev->coreid].device_num++;
1250 /* Disable notifications. */
1251 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1252 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1254 RTE_LOG(INFO, VHOST_DATA,
1255 "(%d) device has been added to data core %d\n",
1262 * These callback allow devices to be added to the data core when configuration
1263 * has been fully complete.
1265 static const struct vhost_device_ops virtio_net_device_ops =
1267 .new_device = new_device,
1268 .destroy_device = destroy_device,
1272 * This is a thread will wake up after a period to print stats if the user has
1276 print_stats(__rte_unused void *arg)
1278 struct vhost_dev *vdev;
1279 uint64_t tx_dropped, rx_dropped;
1280 uint64_t tx, tx_total, rx, rx_total;
1281 const char clr[] = { 27, '[', '2', 'J', '\0' };
1282 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1285 sleep(enable_stats);
1287 /* Clear screen and move to top left */
1288 printf("%s%s\n", clr, top_left);
1289 printf("Device statistics =================================\n");
1291 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1292 tx_total = vdev->stats.tx_total;
1293 tx = vdev->stats.tx;
1294 tx_dropped = tx_total - tx;
1296 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1297 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1298 rx_dropped = rx_total - rx;
1300 printf("Statistics for device %d\n"
1301 "-----------------------\n"
1302 "TX total: %" PRIu64 "\n"
1303 "TX dropped: %" PRIu64 "\n"
1304 "TX successful: %" PRIu64 "\n"
1305 "RX total: %" PRIu64 "\n"
1306 "RX dropped: %" PRIu64 "\n"
1307 "RX successful: %" PRIu64 "\n",
1309 tx_total, tx_dropped, tx,
1310 rx_total, rx_dropped, rx);
1313 printf("===================================================\n");
1320 unregister_drivers(int socket_num)
1324 for (i = 0; i < socket_num; i++) {
1325 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1327 RTE_LOG(ERR, VHOST_CONFIG,
1328 "Fail to unregister vhost driver for %s.\n",
1329 socket_files + i * PATH_MAX);
1333 /* When we receive a INT signal, unregister vhost driver */
1335 sigint_handler(__rte_unused int signum)
1337 /* Unregister vhost driver. */
1338 unregister_drivers(nb_sockets);
1344 * While creating an mbuf pool, one key thing is to figure out how
1345 * many mbuf entries is enough for our use. FYI, here are some
1348 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1350 * - For each switch core (A CPU core does the packet switch), we need
1351 * also make some reservation for receiving the packets from virtio
1352 * Tx queue. How many is enough depends on the usage. It's normally
1353 * a simple calculation like following:
1355 * MAX_PKT_BURST * max packet size / mbuf size
1357 * So, we definitely need allocate more mbufs when TSO is enabled.
1359 * - Similarly, for each switching core, we should serve @nr_rx_desc
1360 * mbufs for receiving the packets from physical NIC device.
1362 * - We also need make sure, for each switch core, we have allocated
1363 * enough mbufs to fill up the mbuf cache.
1366 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1367 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1370 uint32_t nr_mbufs_per_core;
1371 uint32_t mtu = 1500;
1378 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1379 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1380 nr_mbufs_per_core += nr_rx_desc;
1381 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1383 nr_mbufs = nr_queues * nr_rx_desc;
1384 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1385 nr_mbufs *= nr_port;
1387 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1388 nr_mbuf_cache, 0, mbuf_size,
1390 if (mbuf_pool == NULL)
1391 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1395 * Main function, does initialisation and calls the per-lcore functions.
1398 main(int argc, char *argv[])
1400 unsigned lcore_id, core_id = 0;
1401 unsigned nb_ports, valid_num_ports;
1404 static pthread_t tid;
1407 signal(SIGINT, sigint_handler);
1410 ret = rte_eal_init(argc, argv);
1412 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1416 /* parse app arguments */
1417 ret = us_vhost_parse_args(argc, argv);
1419 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1421 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1422 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1424 if (rte_lcore_is_enabled(lcore_id))
1425 lcore_ids[core_id++] = lcore_id;
1428 if (rte_lcore_count() > RTE_MAX_LCORE)
1429 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1431 /* Get the number of physical ports. */
1432 nb_ports = rte_eth_dev_count_avail();
1435 * Update the global var NUM_PORTS and global array PORTS
1436 * and get value of var VALID_NUM_PORTS according to system ports number
1438 valid_num_ports = check_ports_num(nb_ports);
1440 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1441 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1442 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1447 * FIXME: here we are trying to allocate mbufs big enough for
1448 * @MAX_QUEUES, but the truth is we're never going to use that
1449 * many queues here. We probably should only do allocation for
1450 * those queues we are going to use.
1452 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1453 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1455 if (vm2vm_mode == VM2VM_HARDWARE) {
1456 /* Enable VT loop back to let L2 switch to do it. */
1457 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1458 RTE_LOG(DEBUG, VHOST_CONFIG,
1459 "Enable loop back for L2 switch in vmdq.\n");
1462 /* initialize all ports */
1463 RTE_ETH_FOREACH_DEV(portid) {
1464 /* skip ports that are not enabled */
1465 if ((enabled_port_mask & (1 << portid)) == 0) {
1466 RTE_LOG(INFO, VHOST_PORT,
1467 "Skipping disabled port %d\n", portid);
1470 if (port_init(portid) != 0)
1471 rte_exit(EXIT_FAILURE,
1472 "Cannot initialize network ports\n");
1475 /* Enable stats if the user option is set. */
1477 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1480 rte_exit(EXIT_FAILURE,
1481 "Cannot create print-stats thread\n");
1484 /* Launch all data cores. */
1485 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1486 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1489 flags |= RTE_VHOST_USER_CLIENT;
1491 if (dequeue_zero_copy)
1492 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1494 /* Register vhost user driver to handle vhost messages. */
1495 for (i = 0; i < nb_sockets; i++) {
1496 char *file = socket_files + i * PATH_MAX;
1497 ret = rte_vhost_driver_register(file, flags);
1499 unregister_drivers(i);
1500 rte_exit(EXIT_FAILURE,
1501 "vhost driver register failure.\n");
1504 if (builtin_net_driver)
1505 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1507 if (mergeable == 0) {
1508 rte_vhost_driver_disable_features(file,
1509 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1512 if (enable_tx_csum == 0) {
1513 rte_vhost_driver_disable_features(file,
1514 1ULL << VIRTIO_NET_F_CSUM);
1517 if (enable_tso == 0) {
1518 rte_vhost_driver_disable_features(file,
1519 1ULL << VIRTIO_NET_F_HOST_TSO4);
1520 rte_vhost_driver_disable_features(file,
1521 1ULL << VIRTIO_NET_F_HOST_TSO6);
1522 rte_vhost_driver_disable_features(file,
1523 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1524 rte_vhost_driver_disable_features(file,
1525 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1529 rte_vhost_driver_enable_features(file,
1530 1ULL << VIRTIO_NET_F_CTRL_RX);
1533 ret = rte_vhost_driver_callback_register(file,
1534 &virtio_net_device_ops);
1536 rte_exit(EXIT_FAILURE,
1537 "failed to register vhost driver callbacks.\n");
1540 if (rte_vhost_driver_start(file) < 0) {
1541 rte_exit(EXIT_FAILURE,
1542 "failed to start vhost driver.\n");
1546 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1547 rte_eal_wait_lcore(lcore_id);