1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Maximum long option length for option parsing. */
59 #define MAX_LONG_OPT_SZ 64
61 /* mask of enabled ports */
62 static uint32_t enabled_port_mask = 0;
64 /* Promiscuous mode */
65 static uint32_t promiscuous;
67 /* number of devices/queues to support*/
68 static uint32_t num_queues = 0;
69 static uint32_t num_devices;
71 static struct rte_mempool *mbuf_pool;
74 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
81 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
84 static uint32_t enable_stats = 0;
85 /* Enable retries on RX. */
86 static uint32_t enable_retry = 1;
88 /* Disable TX checksum offload */
89 static uint32_t enable_tx_csum;
91 /* Disable TSO offload */
92 static uint32_t enable_tso;
94 static int client_mode;
95 static int dequeue_zero_copy;
97 static int builtin_net_driver;
99 /* Specify timeout (in useconds) between retries on RX. */
100 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
101 /* Specify the number of retries on RX. */
102 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
104 /* Socket file paths. Can be set by user */
105 static char *socket_files;
106 static int nb_sockets;
108 /* empty vmdq configuration structure. Filled in programatically */
109 static struct rte_eth_conf vmdq_conf_default = {
111 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
114 * VLAN strip is necessary for 1G NIC such as I350,
115 * this fixes bug of ipv4 forwarding in guest can't
116 * forward pakets from one virtio dev to another virtio dev.
118 .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
122 .mq_mode = ETH_MQ_TX_NONE,
123 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
124 DEV_TX_OFFLOAD_TCP_CKSUM |
125 DEV_TX_OFFLOAD_VLAN_INSERT |
126 DEV_TX_OFFLOAD_MULTI_SEGS |
127 DEV_TX_OFFLOAD_TCP_TSO),
131 * should be overridden separately in code with
135 .nb_queue_pools = ETH_8_POOLS,
136 .enable_default_pool = 0,
139 .pool_map = {{0, 0},},
145 static unsigned lcore_ids[RTE_MAX_LCORE];
146 static uint16_t ports[RTE_MAX_ETHPORTS];
147 static unsigned num_ports = 0; /**< The number of ports specified in command line */
148 static uint16_t num_pf_queues, num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 static uint16_t queues_per_pool;
152 const uint16_t vlan_tags[] = {
153 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
154 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
155 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
156 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
157 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
158 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
159 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
160 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
163 /* ethernet addresses of ports */
164 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
166 static struct vhost_dev_tailq_list vhost_dev_list =
167 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
169 static struct lcore_info lcore_info[RTE_MAX_LCORE];
171 /* Used for queueing bursts of TX packets. */
175 struct rte_mbuf *m_table[MAX_PKT_BURST];
178 /* TX queue for each data core. */
179 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
181 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
182 / US_PER_S * BURST_TX_DRAIN_US)
186 * Builds up the correct configuration for VMDQ VLAN pool map
187 * according to the pool & queue limits.
190 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
192 struct rte_eth_vmdq_rx_conf conf;
193 struct rte_eth_vmdq_rx_conf *def_conf =
194 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
197 memset(&conf, 0, sizeof(conf));
198 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
199 conf.nb_pool_maps = num_devices;
200 conf.enable_loop_back = def_conf->enable_loop_back;
201 conf.rx_mode = def_conf->rx_mode;
203 for (i = 0; i < conf.nb_pool_maps; i++) {
204 conf.pool_map[i].vlan_id = vlan_tags[ i ];
205 conf.pool_map[i].pools = (1UL << i);
208 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
209 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
210 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
215 * Initialises a given port using global settings and with the rx buffers
216 * coming from the mbuf_pool passed as parameter
219 port_init(uint16_t port)
221 struct rte_eth_dev_info dev_info;
222 struct rte_eth_conf port_conf;
223 struct rte_eth_rxconf *rxconf;
224 struct rte_eth_txconf *txconf;
225 int16_t rx_rings, tx_rings;
226 uint16_t rx_ring_size, tx_ring_size;
230 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
231 retval = rte_eth_dev_info_get(port, &dev_info);
233 RTE_LOG(ERR, VHOST_PORT,
234 "Error during getting device (port %u) info: %s\n",
235 port, strerror(-retval));
240 rxconf = &dev_info.default_rxconf;
241 txconf = &dev_info.default_txconf;
242 rxconf->rx_drop_en = 1;
244 /*configure the number of supported virtio devices based on VMDQ limits */
245 num_devices = dev_info.max_vmdq_pools;
247 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
248 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
251 * When dequeue zero copy is enabled, guest Tx used vring will be
252 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
253 * (tx_ring_size here) must be small enough so that the driver will
254 * hit the free threshold easily and free mbufs timely. Otherwise,
255 * guest Tx vring would be starved.
257 if (dequeue_zero_copy)
260 tx_rings = (uint16_t)rte_lcore_count();
262 /* Get port configuration. */
263 retval = get_eth_conf(&port_conf, num_devices);
266 /* NIC queues are divided into pf queues and vmdq queues. */
267 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
268 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
269 num_vmdq_queues = num_devices * queues_per_pool;
270 num_queues = num_pf_queues + num_vmdq_queues;
271 vmdq_queue_base = dev_info.vmdq_queue_base;
272 vmdq_pool_base = dev_info.vmdq_pool_base;
273 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
274 num_pf_queues, num_devices, queues_per_pool);
276 if (!rte_eth_dev_is_valid_port(port))
279 rx_rings = (uint16_t)dev_info.max_rx_queues;
280 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
281 port_conf.txmode.offloads |=
282 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
283 /* Configure ethernet device. */
284 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
286 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
287 port, strerror(-retval));
291 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
294 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
295 "for port %u: %s.\n", port, strerror(-retval));
298 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
299 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
300 "for Rx queues on port %u.\n", port);
304 /* Setup the queues. */
305 rxconf->offloads = port_conf.rxmode.offloads;
306 for (q = 0; q < rx_rings; q ++) {
307 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
308 rte_eth_dev_socket_id(port),
312 RTE_LOG(ERR, VHOST_PORT,
313 "Failed to setup rx queue %u of port %u: %s.\n",
314 q, port, strerror(-retval));
318 txconf->offloads = port_conf.txmode.offloads;
319 for (q = 0; q < tx_rings; q ++) {
320 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
321 rte_eth_dev_socket_id(port),
324 RTE_LOG(ERR, VHOST_PORT,
325 "Failed to setup tx queue %u of port %u: %s.\n",
326 q, port, strerror(-retval));
331 /* Start the device. */
332 retval = rte_eth_dev_start(port);
334 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
335 port, strerror(-retval));
340 retval = rte_eth_promiscuous_enable(port);
342 RTE_LOG(ERR, VHOST_PORT,
343 "Failed to enable promiscuous mode on port %u: %s\n",
344 port, rte_strerror(-retval));
349 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
350 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
351 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
352 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
354 vmdq_ports_eth_addr[port].addr_bytes[0],
355 vmdq_ports_eth_addr[port].addr_bytes[1],
356 vmdq_ports_eth_addr[port].addr_bytes[2],
357 vmdq_ports_eth_addr[port].addr_bytes[3],
358 vmdq_ports_eth_addr[port].addr_bytes[4],
359 vmdq_ports_eth_addr[port].addr_bytes[5]);
365 * Set socket file path.
368 us_vhost_parse_socket_path(const char *q_arg)
372 /* parse number string */
373 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
377 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
378 if (socket_files == NULL) {
383 strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
390 * Parse the portmask provided at run time.
393 parse_portmask(const char *portmask)
400 /* parse hexadecimal string */
401 pm = strtoul(portmask, &end, 16);
402 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
413 * Parse num options at run time.
416 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
423 /* parse unsigned int string */
424 num = strtoul(q_arg, &end, 10);
425 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
428 if (num > max_valid_value)
439 us_vhost_usage(const char *prgname)
441 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
443 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
444 " --socket-file <path>\n"
446 " -p PORTMASK: Set mask for ports to be used by application\n"
447 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
448 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
449 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
450 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
451 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
452 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
453 " --socket-file: The path of the socket file.\n"
454 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
455 " --tso [0|1] disable/enable TCP segment offload.\n"
456 " --client register a vhost-user socket as client mode.\n"
457 " --dequeue-zero-copy enables dequeue zero copy\n",
462 * Parse the arguments given in the command line of the application.
465 us_vhost_parse_args(int argc, char **argv)
470 const char *prgname = argv[0];
471 static struct option long_option[] = {
472 {"vm2vm", required_argument, NULL, 0},
473 {"rx-retry", required_argument, NULL, 0},
474 {"rx-retry-delay", required_argument, NULL, 0},
475 {"rx-retry-num", required_argument, NULL, 0},
476 {"mergeable", required_argument, NULL, 0},
477 {"stats", required_argument, NULL, 0},
478 {"socket-file", required_argument, NULL, 0},
479 {"tx-csum", required_argument, NULL, 0},
480 {"tso", required_argument, NULL, 0},
481 {"client", no_argument, &client_mode, 1},
482 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
483 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
487 /* Parse command line */
488 while ((opt = getopt_long(argc, argv, "p:P",
489 long_option, &option_index)) != EOF) {
493 enabled_port_mask = parse_portmask(optarg);
494 if (enabled_port_mask == 0) {
495 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
496 us_vhost_usage(prgname);
503 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
504 ETH_VMDQ_ACCEPT_BROADCAST |
505 ETH_VMDQ_ACCEPT_MULTICAST;
510 /* Enable/disable vm2vm comms. */
511 if (!strncmp(long_option[option_index].name, "vm2vm",
513 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
515 RTE_LOG(INFO, VHOST_CONFIG,
516 "Invalid argument for "
518 us_vhost_usage(prgname);
521 vm2vm_mode = (vm2vm_type)ret;
525 /* Enable/disable retries on RX. */
526 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
527 ret = parse_num_opt(optarg, 1);
529 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
530 us_vhost_usage(prgname);
537 /* Enable/disable TX checksum offload. */
538 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
539 ret = parse_num_opt(optarg, 1);
541 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
542 us_vhost_usage(prgname);
545 enable_tx_csum = ret;
548 /* Enable/disable TSO offload. */
549 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
550 ret = parse_num_opt(optarg, 1);
552 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
553 us_vhost_usage(prgname);
559 /* Specify the retries delay time (in useconds) on RX. */
560 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
561 ret = parse_num_opt(optarg, INT32_MAX);
563 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
564 us_vhost_usage(prgname);
567 burst_rx_delay_time = ret;
571 /* Specify the retries number on RX. */
572 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
573 ret = parse_num_opt(optarg, INT32_MAX);
575 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
576 us_vhost_usage(prgname);
579 burst_rx_retry_num = ret;
583 /* Enable/disable RX mergeable buffers. */
584 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
585 ret = parse_num_opt(optarg, 1);
587 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
588 us_vhost_usage(prgname);
593 vmdq_conf_default.rxmode.offloads |=
594 DEV_RX_OFFLOAD_JUMBO_FRAME;
595 vmdq_conf_default.rxmode.max_rx_pkt_len
596 = JUMBO_FRAME_MAX_SIZE;
601 /* Enable/disable stats. */
602 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
603 ret = parse_num_opt(optarg, INT32_MAX);
605 RTE_LOG(INFO, VHOST_CONFIG,
606 "Invalid argument for stats [0..N]\n");
607 us_vhost_usage(prgname);
614 /* Set socket file path. */
615 if (!strncmp(long_option[option_index].name,
616 "socket-file", MAX_LONG_OPT_SZ)) {
617 if (us_vhost_parse_socket_path(optarg) == -1) {
618 RTE_LOG(INFO, VHOST_CONFIG,
619 "Invalid argument for socket name (Max %d characters)\n",
621 us_vhost_usage(prgname);
628 /* Invalid option - print options. */
630 us_vhost_usage(prgname);
635 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
636 if (enabled_port_mask & (1 << i))
637 ports[num_ports++] = i;
640 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
641 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
642 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
650 * Update the global var NUM_PORTS and array PORTS according to system ports number
651 * and return valid ports number
653 static unsigned check_ports_num(unsigned nb_ports)
655 unsigned valid_num_ports = num_ports;
658 if (num_ports > nb_ports) {
659 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
660 num_ports, nb_ports);
661 num_ports = nb_ports;
664 for (portid = 0; portid < num_ports; portid ++) {
665 if (!rte_eth_dev_is_valid_port(ports[portid])) {
666 RTE_LOG(INFO, VHOST_PORT,
667 "\nSpecified port ID(%u) is not valid\n",
669 ports[portid] = INVALID_PORT_ID;
673 return valid_num_ports;
676 static __rte_always_inline struct vhost_dev *
677 find_vhost_dev(struct rte_ether_addr *mac)
679 struct vhost_dev *vdev;
681 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
682 if (vdev->ready == DEVICE_RX &&
683 rte_is_same_ether_addr(mac, &vdev->mac_address))
691 * This function learns the MAC address of the device and registers this along with a
692 * vlan tag to a VMDQ.
695 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
697 struct rte_ether_hdr *pkt_hdr;
700 /* Learn MAC address of guest device from packet */
701 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
703 if (find_vhost_dev(&pkt_hdr->s_addr)) {
704 RTE_LOG(ERR, VHOST_DATA,
705 "(%d) device is using a registered MAC!\n",
710 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
711 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
713 /* vlan_tag currently uses the device_id. */
714 vdev->vlan_tag = vlan_tags[vdev->vid];
716 /* Print out VMDQ registration info. */
717 RTE_LOG(INFO, VHOST_DATA,
718 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
720 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
721 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
722 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
725 /* Register the MAC address. */
726 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
727 (uint32_t)vdev->vid + vmdq_pool_base);
729 RTE_LOG(ERR, VHOST_DATA,
730 "(%d) failed to add device MAC address to VMDQ\n",
733 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
735 /* Set device as ready for RX. */
736 vdev->ready = DEVICE_RX;
742 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
743 * queue before disabling RX on the device.
746 unlink_vmdq(struct vhost_dev *vdev)
750 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
752 if (vdev->ready == DEVICE_RX) {
753 /*clear MAC and VLAN settings*/
754 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
755 for (i = 0; i < 6; i++)
756 vdev->mac_address.addr_bytes[i] = 0;
760 /*Clear out the receive buffers*/
761 rx_count = rte_eth_rx_burst(ports[0],
762 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
765 for (i = 0; i < rx_count; i++)
766 rte_pktmbuf_free(pkts_burst[i]);
768 rx_count = rte_eth_rx_burst(ports[0],
769 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
772 vdev->ready = DEVICE_MAC_LEARNING;
776 static __rte_always_inline void
777 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
782 if (builtin_net_driver) {
783 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
785 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
789 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
790 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
791 src_vdev->stats.tx_total++;
792 src_vdev->stats.tx += ret;
797 * Check if the packet destination MAC address is for a local device. If so then put
798 * the packet on that devices RX queue. If not then return.
800 static __rte_always_inline int
801 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
803 struct rte_ether_hdr *pkt_hdr;
804 struct vhost_dev *dst_vdev;
806 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
808 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
812 if (vdev->vid == dst_vdev->vid) {
813 RTE_LOG_DP(DEBUG, VHOST_DATA,
814 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
819 RTE_LOG_DP(DEBUG, VHOST_DATA,
820 "(%d) TX: MAC address is local\n", dst_vdev->vid);
822 if (unlikely(dst_vdev->remove)) {
823 RTE_LOG_DP(DEBUG, VHOST_DATA,
824 "(%d) device is marked for removal\n", dst_vdev->vid);
828 virtio_xmit(dst_vdev, vdev, m);
833 * Check if the destination MAC of a packet is one local VM,
834 * and get its vlan tag, and offset if it is.
836 static __rte_always_inline int
837 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
838 uint32_t *offset, uint16_t *vlan_tag)
840 struct vhost_dev *dst_vdev;
841 struct rte_ether_hdr *pkt_hdr =
842 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
844 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
848 if (vdev->vid == dst_vdev->vid) {
849 RTE_LOG_DP(DEBUG, VHOST_DATA,
850 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
856 * HW vlan strip will reduce the packet length
857 * by minus length of vlan tag, so need restore
858 * the packet length by plus it.
861 *vlan_tag = vlan_tags[vdev->vid];
863 RTE_LOG_DP(DEBUG, VHOST_DATA,
864 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
865 vdev->vid, dst_vdev->vid, *vlan_tag);
871 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
873 if (ol_flags & PKT_TX_IPV4)
874 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
875 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
876 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
879 static void virtio_tx_offload(struct rte_mbuf *m)
882 struct rte_ipv4_hdr *ipv4_hdr = NULL;
883 struct rte_tcp_hdr *tcp_hdr = NULL;
884 struct rte_ether_hdr *eth_hdr =
885 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
887 l3_hdr = (char *)eth_hdr + m->l2_len;
889 if (m->ol_flags & PKT_TX_IPV4) {
891 ipv4_hdr->hdr_checksum = 0;
892 m->ol_flags |= PKT_TX_IP_CKSUM;
895 tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
896 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
900 free_pkts(struct rte_mbuf **pkts, uint16_t n)
903 rte_pktmbuf_free(pkts[n]);
906 static __rte_always_inline void
907 do_drain_mbuf_table(struct mbuf_table *tx_q)
911 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
912 tx_q->m_table, tx_q->len);
913 if (unlikely(count < tx_q->len))
914 free_pkts(&tx_q->m_table[count], tx_q->len - count);
920 * This function routes the TX packet to the correct interface. This
921 * may be a local device or the physical port.
923 static __rte_always_inline void
924 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
926 struct mbuf_table *tx_q;
928 const uint16_t lcore_id = rte_lcore_id();
929 struct rte_ether_hdr *nh;
932 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
933 if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
934 struct vhost_dev *vdev2;
936 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
938 virtio_xmit(vdev2, vdev, m);
943 /*check if destination is local VM*/
944 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
949 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
950 if (unlikely(find_local_dest(vdev, m, &offset,
957 RTE_LOG_DP(DEBUG, VHOST_DATA,
958 "(%d) TX: MAC address is external\n", vdev->vid);
962 /*Add packet to the port tx queue*/
963 tx_q = &lcore_tx_queue[lcore_id];
965 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
966 if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
967 /* Guest has inserted the vlan tag. */
968 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
969 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
970 if ((vm2vm_mode == VM2VM_HARDWARE) &&
971 (vh->vlan_tci != vlan_tag_be))
972 vh->vlan_tci = vlan_tag_be;
974 m->ol_flags |= PKT_TX_VLAN_PKT;
977 * Find the right seg to adjust the data len when offset is
978 * bigger than tail room size.
980 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
981 if (likely(offset <= rte_pktmbuf_tailroom(m)))
982 m->data_len += offset;
984 struct rte_mbuf *seg = m;
986 while ((seg->next != NULL) &&
987 (offset > rte_pktmbuf_tailroom(seg)))
990 seg->data_len += offset;
992 m->pkt_len += offset;
995 m->vlan_tci = vlan_tag;
998 if (m->ol_flags & PKT_TX_TCP_SEG)
999 virtio_tx_offload(m);
1001 tx_q->m_table[tx_q->len++] = m;
1003 vdev->stats.tx_total++;
1007 if (unlikely(tx_q->len == MAX_PKT_BURST))
1008 do_drain_mbuf_table(tx_q);
1012 static __rte_always_inline void
1013 drain_mbuf_table(struct mbuf_table *tx_q)
1015 static uint64_t prev_tsc;
1021 cur_tsc = rte_rdtsc();
1022 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1025 RTE_LOG_DP(DEBUG, VHOST_DATA,
1026 "TX queue drained after timeout with burst size %u\n",
1028 do_drain_mbuf_table(tx_q);
1032 static __rte_always_inline void
1033 drain_eth_rx(struct vhost_dev *vdev)
1035 uint16_t rx_count, enqueue_count;
1036 struct rte_mbuf *pkts[MAX_PKT_BURST];
1038 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1039 pkts, MAX_PKT_BURST);
1044 * When "enable_retry" is set, here we wait and retry when there
1045 * is no enough free slots in the queue to hold @rx_count packets,
1046 * to diminish packet loss.
1049 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1053 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1054 rte_delay_us(burst_rx_delay_time);
1055 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1061 if (builtin_net_driver) {
1062 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1065 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1069 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1070 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1073 free_pkts(pkts, rx_count);
1076 static __rte_always_inline void
1077 drain_virtio_tx(struct vhost_dev *vdev)
1079 struct rte_mbuf *pkts[MAX_PKT_BURST];
1083 if (builtin_net_driver) {
1084 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1085 pkts, MAX_PKT_BURST);
1087 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1088 mbuf_pool, pkts, MAX_PKT_BURST);
1091 /* setup VMDq for the first packet */
1092 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1093 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1094 free_pkts(pkts, count);
1097 for (i = 0; i < count; ++i)
1098 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1102 * Main function of vhost-switch. It basically does:
1104 * for each vhost device {
1107 * Which drains the host eth Rx queue linked to the vhost device,
1108 * and deliver all of them to guest virito Rx ring associated with
1109 * this vhost device.
1111 * - drain_virtio_tx()
1113 * Which drains the guest virtio Tx queue and deliver all of them
1114 * to the target, which could be another vhost device, or the
1115 * physical eth dev. The route is done in function "virtio_tx_route".
1119 switch_worker(void *arg __rte_unused)
1122 unsigned lcore_id = rte_lcore_id();
1123 struct vhost_dev *vdev;
1124 struct mbuf_table *tx_q;
1126 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1128 tx_q = &lcore_tx_queue[lcore_id];
1129 for (i = 0; i < rte_lcore_count(); i++) {
1130 if (lcore_ids[i] == lcore_id) {
1137 drain_mbuf_table(tx_q);
1140 * Inform the configuration core that we have exited the
1141 * linked list and that no devices are in use if requested.
1143 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1144 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1147 * Process vhost devices
1149 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1151 if (unlikely(vdev->remove)) {
1153 vdev->ready = DEVICE_SAFE_REMOVE;
1157 if (likely(vdev->ready == DEVICE_RX))
1160 if (likely(!vdev->remove))
1161 drain_virtio_tx(vdev);
1169 * Remove a device from the specific data core linked list and from the
1170 * main linked list. Synchonization occurs through the use of the
1171 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1172 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1175 destroy_device(int vid)
1177 struct vhost_dev *vdev = NULL;
1180 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1181 if (vdev->vid == vid)
1186 /*set the remove flag. */
1188 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1192 if (builtin_net_driver)
1193 vs_vhost_net_remove(vdev);
1195 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1197 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1200 /* Set the dev_removal_flag on each lcore. */
1201 RTE_LCORE_FOREACH_SLAVE(lcore)
1202 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1205 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1206 * we can be sure that they can no longer access the device removed
1207 * from the linked lists and that the devices are no longer in use.
1209 RTE_LCORE_FOREACH_SLAVE(lcore) {
1210 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1214 lcore_info[vdev->coreid].device_num--;
1216 RTE_LOG(INFO, VHOST_DATA,
1217 "(%d) device has been removed from data core\n",
1224 * A new device is added to a data core. First the device is added to the main linked list
1225 * and then allocated to a specific data core.
1230 int lcore, core_add = 0;
1231 uint32_t device_num_min = num_devices;
1232 struct vhost_dev *vdev;
1234 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1236 RTE_LOG(INFO, VHOST_DATA,
1237 "(%d) couldn't allocate memory for vhost dev\n",
1243 if (builtin_net_driver)
1244 vs_vhost_net_setup(vdev);
1246 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1247 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1249 /*reset ready flag*/
1250 vdev->ready = DEVICE_MAC_LEARNING;
1253 /* Find a suitable lcore to add the device. */
1254 RTE_LCORE_FOREACH_SLAVE(lcore) {
1255 if (lcore_info[lcore].device_num < device_num_min) {
1256 device_num_min = lcore_info[lcore].device_num;
1260 vdev->coreid = core_add;
1262 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1264 lcore_info[vdev->coreid].device_num++;
1266 /* Disable notifications. */
1267 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1268 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1270 RTE_LOG(INFO, VHOST_DATA,
1271 "(%d) device has been added to data core %d\n",
1278 * These callback allow devices to be added to the data core when configuration
1279 * has been fully complete.
1281 static const struct vhost_device_ops virtio_net_device_ops =
1283 .new_device = new_device,
1284 .destroy_device = destroy_device,
1288 * This is a thread will wake up after a period to print stats if the user has
1292 print_stats(__rte_unused void *arg)
1294 struct vhost_dev *vdev;
1295 uint64_t tx_dropped, rx_dropped;
1296 uint64_t tx, tx_total, rx, rx_total;
1297 const char clr[] = { 27, '[', '2', 'J', '\0' };
1298 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1301 sleep(enable_stats);
1303 /* Clear screen and move to top left */
1304 printf("%s%s\n", clr, top_left);
1305 printf("Device statistics =================================\n");
1307 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1308 tx_total = vdev->stats.tx_total;
1309 tx = vdev->stats.tx;
1310 tx_dropped = tx_total - tx;
1312 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1313 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1314 rx_dropped = rx_total - rx;
1316 printf("Statistics for device %d\n"
1317 "-----------------------\n"
1318 "TX total: %" PRIu64 "\n"
1319 "TX dropped: %" PRIu64 "\n"
1320 "TX successful: %" PRIu64 "\n"
1321 "RX total: %" PRIu64 "\n"
1322 "RX dropped: %" PRIu64 "\n"
1323 "RX successful: %" PRIu64 "\n",
1325 tx_total, tx_dropped, tx,
1326 rx_total, rx_dropped, rx);
1329 printf("===================================================\n");
1336 unregister_drivers(int socket_num)
1340 for (i = 0; i < socket_num; i++) {
1341 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1343 RTE_LOG(ERR, VHOST_CONFIG,
1344 "Fail to unregister vhost driver for %s.\n",
1345 socket_files + i * PATH_MAX);
1349 /* When we receive a INT signal, unregister vhost driver */
1351 sigint_handler(__rte_unused int signum)
1353 /* Unregister vhost driver. */
1354 unregister_drivers(nb_sockets);
1360 * While creating an mbuf pool, one key thing is to figure out how
1361 * many mbuf entries is enough for our use. FYI, here are some
1364 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1366 * - For each switch core (A CPU core does the packet switch), we need
1367 * also make some reservation for receiving the packets from virtio
1368 * Tx queue. How many is enough depends on the usage. It's normally
1369 * a simple calculation like following:
1371 * MAX_PKT_BURST * max packet size / mbuf size
1373 * So, we definitely need allocate more mbufs when TSO is enabled.
1375 * - Similarly, for each switching core, we should serve @nr_rx_desc
1376 * mbufs for receiving the packets from physical NIC device.
1378 * - We also need make sure, for each switch core, we have allocated
1379 * enough mbufs to fill up the mbuf cache.
1382 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1383 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1386 uint32_t nr_mbufs_per_core;
1387 uint32_t mtu = 1500;
1394 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1395 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1396 nr_mbufs_per_core += nr_rx_desc;
1397 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1399 nr_mbufs = nr_queues * nr_rx_desc;
1400 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1401 nr_mbufs *= nr_port;
1403 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1404 nr_mbuf_cache, 0, mbuf_size,
1406 if (mbuf_pool == NULL)
1407 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1411 * Main function, does initialisation and calls the per-lcore functions.
1414 main(int argc, char *argv[])
1416 unsigned lcore_id, core_id = 0;
1417 unsigned nb_ports, valid_num_ports;
1420 static pthread_t tid;
1423 signal(SIGINT, sigint_handler);
1426 ret = rte_eal_init(argc, argv);
1428 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1432 /* parse app arguments */
1433 ret = us_vhost_parse_args(argc, argv);
1435 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1437 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1438 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1440 if (rte_lcore_is_enabled(lcore_id))
1441 lcore_ids[core_id++] = lcore_id;
1444 if (rte_lcore_count() > RTE_MAX_LCORE)
1445 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1447 /* Get the number of physical ports. */
1448 nb_ports = rte_eth_dev_count_avail();
1451 * Update the global var NUM_PORTS and global array PORTS
1452 * and get value of var VALID_NUM_PORTS according to system ports number
1454 valid_num_ports = check_ports_num(nb_ports);
1456 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1457 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1458 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1463 * FIXME: here we are trying to allocate mbufs big enough for
1464 * @MAX_QUEUES, but the truth is we're never going to use that
1465 * many queues here. We probably should only do allocation for
1466 * those queues we are going to use.
1468 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1469 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1471 if (vm2vm_mode == VM2VM_HARDWARE) {
1472 /* Enable VT loop back to let L2 switch to do it. */
1473 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1474 RTE_LOG(DEBUG, VHOST_CONFIG,
1475 "Enable loop back for L2 switch in vmdq.\n");
1478 /* initialize all ports */
1479 RTE_ETH_FOREACH_DEV(portid) {
1480 /* skip ports that are not enabled */
1481 if ((enabled_port_mask & (1 << portid)) == 0) {
1482 RTE_LOG(INFO, VHOST_PORT,
1483 "Skipping disabled port %d\n", portid);
1486 if (port_init(portid) != 0)
1487 rte_exit(EXIT_FAILURE,
1488 "Cannot initialize network ports\n");
1491 /* Enable stats if the user option is set. */
1493 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1496 rte_exit(EXIT_FAILURE,
1497 "Cannot create print-stats thread\n");
1500 /* Launch all data cores. */
1501 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1502 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1505 flags |= RTE_VHOST_USER_CLIENT;
1507 if (dequeue_zero_copy)
1508 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1510 /* Register vhost user driver to handle vhost messages. */
1511 for (i = 0; i < nb_sockets; i++) {
1512 char *file = socket_files + i * PATH_MAX;
1513 ret = rte_vhost_driver_register(file, flags);
1515 unregister_drivers(i);
1516 rte_exit(EXIT_FAILURE,
1517 "vhost driver register failure.\n");
1520 if (builtin_net_driver)
1521 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1523 if (mergeable == 0) {
1524 rte_vhost_driver_disable_features(file,
1525 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1528 if (enable_tx_csum == 0) {
1529 rte_vhost_driver_disable_features(file,
1530 1ULL << VIRTIO_NET_F_CSUM);
1533 if (enable_tso == 0) {
1534 rte_vhost_driver_disable_features(file,
1535 1ULL << VIRTIO_NET_F_HOST_TSO4);
1536 rte_vhost_driver_disable_features(file,
1537 1ULL << VIRTIO_NET_F_HOST_TSO6);
1538 rte_vhost_driver_disable_features(file,
1539 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1540 rte_vhost_driver_disable_features(file,
1541 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1545 rte_vhost_driver_enable_features(file,
1546 1ULL << VIRTIO_NET_F_CTRL_RX);
1549 ret = rte_vhost_driver_callback_register(file,
1550 &virtio_net_device_ops);
1552 rte_exit(EXIT_FAILURE,
1553 "failed to register vhost driver callbacks.\n");
1556 if (rte_vhost_driver_start(file) < 0) {
1557 rte_exit(EXIT_FAILURE,
1558 "failed to start vhost driver.\n");
1562 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1563 rte_eal_wait_lcore(lcore_id);