1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
31 #define MAX_QUEUES 128
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
40 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
42 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
45 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
50 #define DEVICE_SAFE_REMOVE 2
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
56 #define INVALID_PORT_ID 0xFF
58 /* Maximum long option length for option parsing. */
59 #define MAX_LONG_OPT_SZ 64
61 /* mask of enabled ports */
62 static uint32_t enabled_port_mask = 0;
64 /* Promiscuous mode */
65 static uint32_t promiscuous;
67 /* number of devices/queues to support*/
68 static uint32_t num_queues = 0;
69 static uint32_t num_devices;
71 static struct rte_mempool *mbuf_pool;
74 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
81 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
84 static uint32_t enable_stats = 0;
85 /* Enable retries on RX. */
86 static uint32_t enable_retry = 1;
88 /* Disable TX checksum offload */
89 static uint32_t enable_tx_csum;
91 /* Disable TSO offload */
92 static uint32_t enable_tso;
94 static int client_mode;
95 static int dequeue_zero_copy;
97 static int builtin_net_driver;
99 /* Specify timeout (in useconds) between retries on RX. */
100 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
101 /* Specify the number of retries on RX. */
102 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
104 /* Socket file paths. Can be set by user */
105 static char *socket_files;
106 static int nb_sockets;
108 /* empty vmdq configuration structure. Filled in programatically */
109 static struct rte_eth_conf vmdq_conf_default = {
111 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
114 * VLAN strip is necessary for 1G NIC such as I350,
115 * this fixes bug of ipv4 forwarding in guest can't
116 * forward pakets from one virtio dev to another virtio dev.
118 .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
122 .mq_mode = ETH_MQ_TX_NONE,
123 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
124 DEV_TX_OFFLOAD_TCP_CKSUM |
125 DEV_TX_OFFLOAD_VLAN_INSERT |
126 DEV_TX_OFFLOAD_MULTI_SEGS |
127 DEV_TX_OFFLOAD_TCP_TSO),
131 * should be overridden separately in code with
135 .nb_queue_pools = ETH_8_POOLS,
136 .enable_default_pool = 0,
139 .pool_map = {{0, 0},},
145 static unsigned lcore_ids[RTE_MAX_LCORE];
146 static uint16_t ports[RTE_MAX_ETHPORTS];
147 static unsigned num_ports = 0; /**< The number of ports specified in command line */
148 static uint16_t num_pf_queues, num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 static uint16_t queues_per_pool;
152 const uint16_t vlan_tags[] = {
153 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
154 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
155 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
156 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
157 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
158 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
159 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
160 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
163 /* ethernet addresses of ports */
164 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
166 static struct vhost_dev_tailq_list vhost_dev_list =
167 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
169 static struct lcore_info lcore_info[RTE_MAX_LCORE];
171 /* Used for queueing bursts of TX packets. */
175 struct rte_mbuf *m_table[MAX_PKT_BURST];
178 /* TX queue for each data core. */
179 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
181 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
182 / US_PER_S * BURST_TX_DRAIN_US)
186 * Builds up the correct configuration for VMDQ VLAN pool map
187 * according to the pool & queue limits.
190 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
192 struct rte_eth_vmdq_rx_conf conf;
193 struct rte_eth_vmdq_rx_conf *def_conf =
194 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
197 memset(&conf, 0, sizeof(conf));
198 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
199 conf.nb_pool_maps = num_devices;
200 conf.enable_loop_back = def_conf->enable_loop_back;
201 conf.rx_mode = def_conf->rx_mode;
203 for (i = 0; i < conf.nb_pool_maps; i++) {
204 conf.pool_map[i].vlan_id = vlan_tags[ i ];
205 conf.pool_map[i].pools = (1UL << i);
208 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
209 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
210 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
215 * Initialises a given port using global settings and with the rx buffers
216 * coming from the mbuf_pool passed as parameter
219 port_init(uint16_t port)
221 struct rte_eth_dev_info dev_info;
222 struct rte_eth_conf port_conf;
223 struct rte_eth_rxconf *rxconf;
224 struct rte_eth_txconf *txconf;
225 int16_t rx_rings, tx_rings;
226 uint16_t rx_ring_size, tx_ring_size;
230 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
231 rte_eth_dev_info_get (port, &dev_info);
233 rxconf = &dev_info.default_rxconf;
234 txconf = &dev_info.default_txconf;
235 rxconf->rx_drop_en = 1;
237 /*configure the number of supported virtio devices based on VMDQ limits */
238 num_devices = dev_info.max_vmdq_pools;
240 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
241 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
244 * When dequeue zero copy is enabled, guest Tx used vring will be
245 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
246 * (tx_ring_size here) must be small enough so that the driver will
247 * hit the free threshold easily and free mbufs timely. Otherwise,
248 * guest Tx vring would be starved.
250 if (dequeue_zero_copy)
253 tx_rings = (uint16_t)rte_lcore_count();
255 /* Get port configuration. */
256 retval = get_eth_conf(&port_conf, num_devices);
259 /* NIC queues are divided into pf queues and vmdq queues. */
260 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
261 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
262 num_vmdq_queues = num_devices * queues_per_pool;
263 num_queues = num_pf_queues + num_vmdq_queues;
264 vmdq_queue_base = dev_info.vmdq_queue_base;
265 vmdq_pool_base = dev_info.vmdq_pool_base;
266 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
267 num_pf_queues, num_devices, queues_per_pool);
269 if (!rte_eth_dev_is_valid_port(port))
272 rx_rings = (uint16_t)dev_info.max_rx_queues;
273 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
274 port_conf.txmode.offloads |=
275 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
276 /* Configure ethernet device. */
277 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
279 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
280 port, strerror(-retval));
284 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
287 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
288 "for port %u: %s.\n", port, strerror(-retval));
291 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
292 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
293 "for Rx queues on port %u.\n", port);
297 /* Setup the queues. */
298 rxconf->offloads = port_conf.rxmode.offloads;
299 for (q = 0; q < rx_rings; q ++) {
300 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
301 rte_eth_dev_socket_id(port),
305 RTE_LOG(ERR, VHOST_PORT,
306 "Failed to setup rx queue %u of port %u: %s.\n",
307 q, port, strerror(-retval));
311 txconf->offloads = port_conf.txmode.offloads;
312 for (q = 0; q < tx_rings; q ++) {
313 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
314 rte_eth_dev_socket_id(port),
317 RTE_LOG(ERR, VHOST_PORT,
318 "Failed to setup tx queue %u of port %u: %s.\n",
319 q, port, strerror(-retval));
324 /* Start the device. */
325 retval = rte_eth_dev_start(port);
327 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
328 port, strerror(-retval));
333 rte_eth_promiscuous_enable(port);
335 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
336 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
337 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
338 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
340 vmdq_ports_eth_addr[port].addr_bytes[0],
341 vmdq_ports_eth_addr[port].addr_bytes[1],
342 vmdq_ports_eth_addr[port].addr_bytes[2],
343 vmdq_ports_eth_addr[port].addr_bytes[3],
344 vmdq_ports_eth_addr[port].addr_bytes[4],
345 vmdq_ports_eth_addr[port].addr_bytes[5]);
351 * Set socket file path.
354 us_vhost_parse_socket_path(const char *q_arg)
356 /* parse number string */
357 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
360 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
361 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
368 * Parse the portmask provided at run time.
371 parse_portmask(const char *portmask)
378 /* parse hexadecimal string */
379 pm = strtoul(portmask, &end, 16);
380 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
391 * Parse num options at run time.
394 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
401 /* parse unsigned int string */
402 num = strtoul(q_arg, &end, 10);
403 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
406 if (num > max_valid_value)
417 us_vhost_usage(const char *prgname)
419 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
421 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
422 " --socket-file <path>\n"
424 " -p PORTMASK: Set mask for ports to be used by application\n"
425 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
426 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
427 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
428 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
429 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
430 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
431 " --socket-file: The path of the socket file.\n"
432 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
433 " --tso [0|1] disable/enable TCP segment offload.\n"
434 " --client register a vhost-user socket as client mode.\n"
435 " --dequeue-zero-copy enables dequeue zero copy\n",
440 * Parse the arguments given in the command line of the application.
443 us_vhost_parse_args(int argc, char **argv)
448 const char *prgname = argv[0];
449 static struct option long_option[] = {
450 {"vm2vm", required_argument, NULL, 0},
451 {"rx-retry", required_argument, NULL, 0},
452 {"rx-retry-delay", required_argument, NULL, 0},
453 {"rx-retry-num", required_argument, NULL, 0},
454 {"mergeable", required_argument, NULL, 0},
455 {"stats", required_argument, NULL, 0},
456 {"socket-file", required_argument, NULL, 0},
457 {"tx-csum", required_argument, NULL, 0},
458 {"tso", required_argument, NULL, 0},
459 {"client", no_argument, &client_mode, 1},
460 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
461 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
465 /* Parse command line */
466 while ((opt = getopt_long(argc, argv, "p:P",
467 long_option, &option_index)) != EOF) {
471 enabled_port_mask = parse_portmask(optarg);
472 if (enabled_port_mask == 0) {
473 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
474 us_vhost_usage(prgname);
481 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
482 ETH_VMDQ_ACCEPT_BROADCAST |
483 ETH_VMDQ_ACCEPT_MULTICAST;
488 /* Enable/disable vm2vm comms. */
489 if (!strncmp(long_option[option_index].name, "vm2vm",
491 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
493 RTE_LOG(INFO, VHOST_CONFIG,
494 "Invalid argument for "
496 us_vhost_usage(prgname);
499 vm2vm_mode = (vm2vm_type)ret;
503 /* Enable/disable retries on RX. */
504 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
505 ret = parse_num_opt(optarg, 1);
507 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
508 us_vhost_usage(prgname);
515 /* Enable/disable TX checksum offload. */
516 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
517 ret = parse_num_opt(optarg, 1);
519 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
520 us_vhost_usage(prgname);
523 enable_tx_csum = ret;
526 /* Enable/disable TSO offload. */
527 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
528 ret = parse_num_opt(optarg, 1);
530 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
531 us_vhost_usage(prgname);
537 /* Specify the retries delay time (in useconds) on RX. */
538 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
539 ret = parse_num_opt(optarg, INT32_MAX);
541 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
542 us_vhost_usage(prgname);
545 burst_rx_delay_time = ret;
549 /* Specify the retries number on RX. */
550 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
551 ret = parse_num_opt(optarg, INT32_MAX);
553 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
554 us_vhost_usage(prgname);
557 burst_rx_retry_num = ret;
561 /* Enable/disable RX mergeable buffers. */
562 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
563 ret = parse_num_opt(optarg, 1);
565 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
566 us_vhost_usage(prgname);
571 vmdq_conf_default.rxmode.offloads |=
572 DEV_RX_OFFLOAD_JUMBO_FRAME;
573 vmdq_conf_default.rxmode.max_rx_pkt_len
574 = JUMBO_FRAME_MAX_SIZE;
579 /* Enable/disable stats. */
580 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
581 ret = parse_num_opt(optarg, INT32_MAX);
583 RTE_LOG(INFO, VHOST_CONFIG,
584 "Invalid argument for stats [0..N]\n");
585 us_vhost_usage(prgname);
592 /* Set socket file path. */
593 if (!strncmp(long_option[option_index].name,
594 "socket-file", MAX_LONG_OPT_SZ)) {
595 if (us_vhost_parse_socket_path(optarg) == -1) {
596 RTE_LOG(INFO, VHOST_CONFIG,
597 "Invalid argument for socket name (Max %d characters)\n",
599 us_vhost_usage(prgname);
606 /* Invalid option - print options. */
608 us_vhost_usage(prgname);
613 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
614 if (enabled_port_mask & (1 << i))
615 ports[num_ports++] = i;
618 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
619 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
620 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
628 * Update the global var NUM_PORTS and array PORTS according to system ports number
629 * and return valid ports number
631 static unsigned check_ports_num(unsigned nb_ports)
633 unsigned valid_num_ports = num_ports;
636 if (num_ports > nb_ports) {
637 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
638 num_ports, nb_ports);
639 num_ports = nb_ports;
642 for (portid = 0; portid < num_ports; portid ++) {
643 if (!rte_eth_dev_is_valid_port(ports[portid])) {
644 RTE_LOG(INFO, VHOST_PORT,
645 "\nSpecified port ID(%u) is not valid\n",
647 ports[portid] = INVALID_PORT_ID;
651 return valid_num_ports;
654 static __rte_always_inline struct vhost_dev *
655 find_vhost_dev(struct ether_addr *mac)
657 struct vhost_dev *vdev;
659 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
660 if (vdev->ready == DEVICE_RX &&
661 is_same_ether_addr(mac, &vdev->mac_address))
669 * This function learns the MAC address of the device and registers this along with a
670 * vlan tag to a VMDQ.
673 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
675 struct ether_hdr *pkt_hdr;
678 /* Learn MAC address of guest device from packet */
679 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
681 if (find_vhost_dev(&pkt_hdr->s_addr)) {
682 RTE_LOG(ERR, VHOST_DATA,
683 "(%d) device is using a registered MAC!\n",
688 for (i = 0; i < ETHER_ADDR_LEN; i++)
689 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
691 /* vlan_tag currently uses the device_id. */
692 vdev->vlan_tag = vlan_tags[vdev->vid];
694 /* Print out VMDQ registration info. */
695 RTE_LOG(INFO, VHOST_DATA,
696 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
698 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
699 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
700 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
703 /* Register the MAC address. */
704 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
705 (uint32_t)vdev->vid + vmdq_pool_base);
707 RTE_LOG(ERR, VHOST_DATA,
708 "(%d) failed to add device MAC address to VMDQ\n",
711 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
713 /* Set device as ready for RX. */
714 vdev->ready = DEVICE_RX;
720 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
721 * queue before disabling RX on the device.
724 unlink_vmdq(struct vhost_dev *vdev)
728 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
730 if (vdev->ready == DEVICE_RX) {
731 /*clear MAC and VLAN settings*/
732 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
733 for (i = 0; i < 6; i++)
734 vdev->mac_address.addr_bytes[i] = 0;
738 /*Clear out the receive buffers*/
739 rx_count = rte_eth_rx_burst(ports[0],
740 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
743 for (i = 0; i < rx_count; i++)
744 rte_pktmbuf_free(pkts_burst[i]);
746 rx_count = rte_eth_rx_burst(ports[0],
747 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
750 vdev->ready = DEVICE_MAC_LEARNING;
754 static __rte_always_inline void
755 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
760 if (builtin_net_driver) {
761 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
763 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
767 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
768 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
769 src_vdev->stats.tx_total++;
770 src_vdev->stats.tx += ret;
775 * Check if the packet destination MAC address is for a local device. If so then put
776 * the packet on that devices RX queue. If not then return.
778 static __rte_always_inline int
779 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
781 struct ether_hdr *pkt_hdr;
782 struct vhost_dev *dst_vdev;
784 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
786 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
790 if (vdev->vid == dst_vdev->vid) {
791 RTE_LOG_DP(DEBUG, VHOST_DATA,
792 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
797 RTE_LOG_DP(DEBUG, VHOST_DATA,
798 "(%d) TX: MAC address is local\n", dst_vdev->vid);
800 if (unlikely(dst_vdev->remove)) {
801 RTE_LOG_DP(DEBUG, VHOST_DATA,
802 "(%d) device is marked for removal\n", dst_vdev->vid);
806 virtio_xmit(dst_vdev, vdev, m);
811 * Check if the destination MAC of a packet is one local VM,
812 * and get its vlan tag, and offset if it is.
814 static __rte_always_inline int
815 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
816 uint32_t *offset, uint16_t *vlan_tag)
818 struct vhost_dev *dst_vdev;
819 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
821 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
825 if (vdev->vid == dst_vdev->vid) {
826 RTE_LOG_DP(DEBUG, VHOST_DATA,
827 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
833 * HW vlan strip will reduce the packet length
834 * by minus length of vlan tag, so need restore
835 * the packet length by plus it.
838 *vlan_tag = vlan_tags[vdev->vid];
840 RTE_LOG_DP(DEBUG, VHOST_DATA,
841 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
842 vdev->vid, dst_vdev->vid, *vlan_tag);
848 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
850 if (ol_flags & PKT_TX_IPV4)
851 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
852 else /* assume ethertype == ETHER_TYPE_IPv6 */
853 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
856 static void virtio_tx_offload(struct rte_mbuf *m)
859 struct ipv4_hdr *ipv4_hdr = NULL;
860 struct tcp_hdr *tcp_hdr = NULL;
861 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
863 l3_hdr = (char *)eth_hdr + m->l2_len;
865 if (m->ol_flags & PKT_TX_IPV4) {
867 ipv4_hdr->hdr_checksum = 0;
868 m->ol_flags |= PKT_TX_IP_CKSUM;
871 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
872 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
876 free_pkts(struct rte_mbuf **pkts, uint16_t n)
879 rte_pktmbuf_free(pkts[n]);
882 static __rte_always_inline void
883 do_drain_mbuf_table(struct mbuf_table *tx_q)
887 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
888 tx_q->m_table, tx_q->len);
889 if (unlikely(count < tx_q->len))
890 free_pkts(&tx_q->m_table[count], tx_q->len - count);
896 * This function routes the TX packet to the correct interface. This
897 * may be a local device or the physical port.
899 static __rte_always_inline void
900 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
902 struct mbuf_table *tx_q;
904 const uint16_t lcore_id = rte_lcore_id();
905 struct ether_hdr *nh;
908 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
909 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
910 struct vhost_dev *vdev2;
912 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
914 virtio_xmit(vdev2, vdev, m);
919 /*check if destination is local VM*/
920 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
925 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
926 if (unlikely(find_local_dest(vdev, m, &offset,
933 RTE_LOG_DP(DEBUG, VHOST_DATA,
934 "(%d) TX: MAC address is external\n", vdev->vid);
938 /*Add packet to the port tx queue*/
939 tx_q = &lcore_tx_queue[lcore_id];
941 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
942 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
943 /* Guest has inserted the vlan tag. */
944 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
945 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
946 if ((vm2vm_mode == VM2VM_HARDWARE) &&
947 (vh->vlan_tci != vlan_tag_be))
948 vh->vlan_tci = vlan_tag_be;
950 m->ol_flags |= PKT_TX_VLAN_PKT;
953 * Find the right seg to adjust the data len when offset is
954 * bigger than tail room size.
956 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
957 if (likely(offset <= rte_pktmbuf_tailroom(m)))
958 m->data_len += offset;
960 struct rte_mbuf *seg = m;
962 while ((seg->next != NULL) &&
963 (offset > rte_pktmbuf_tailroom(seg)))
966 seg->data_len += offset;
968 m->pkt_len += offset;
971 m->vlan_tci = vlan_tag;
974 if (m->ol_flags & PKT_TX_TCP_SEG)
975 virtio_tx_offload(m);
977 tx_q->m_table[tx_q->len++] = m;
979 vdev->stats.tx_total++;
983 if (unlikely(tx_q->len == MAX_PKT_BURST))
984 do_drain_mbuf_table(tx_q);
988 static __rte_always_inline void
989 drain_mbuf_table(struct mbuf_table *tx_q)
991 static uint64_t prev_tsc;
997 cur_tsc = rte_rdtsc();
998 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1001 RTE_LOG_DP(DEBUG, VHOST_DATA,
1002 "TX queue drained after timeout with burst size %u\n",
1004 do_drain_mbuf_table(tx_q);
1008 static __rte_always_inline void
1009 drain_eth_rx(struct vhost_dev *vdev)
1011 uint16_t rx_count, enqueue_count;
1012 struct rte_mbuf *pkts[MAX_PKT_BURST];
1014 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1015 pkts, MAX_PKT_BURST);
1020 * When "enable_retry" is set, here we wait and retry when there
1021 * is no enough free slots in the queue to hold @rx_count packets,
1022 * to diminish packet loss.
1025 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1029 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1030 rte_delay_us(burst_rx_delay_time);
1031 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1037 if (builtin_net_driver) {
1038 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1041 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1045 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1046 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1049 free_pkts(pkts, rx_count);
1052 static __rte_always_inline void
1053 drain_virtio_tx(struct vhost_dev *vdev)
1055 struct rte_mbuf *pkts[MAX_PKT_BURST];
1059 if (builtin_net_driver) {
1060 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1061 pkts, MAX_PKT_BURST);
1063 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1064 mbuf_pool, pkts, MAX_PKT_BURST);
1067 /* setup VMDq for the first packet */
1068 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1069 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1070 free_pkts(pkts, count);
1073 for (i = 0; i < count; ++i)
1074 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1078 * Main function of vhost-switch. It basically does:
1080 * for each vhost device {
1083 * Which drains the host eth Rx queue linked to the vhost device,
1084 * and deliver all of them to guest virito Rx ring associated with
1085 * this vhost device.
1087 * - drain_virtio_tx()
1089 * Which drains the guest virtio Tx queue and deliver all of them
1090 * to the target, which could be another vhost device, or the
1091 * physical eth dev. The route is done in function "virtio_tx_route".
1095 switch_worker(void *arg __rte_unused)
1098 unsigned lcore_id = rte_lcore_id();
1099 struct vhost_dev *vdev;
1100 struct mbuf_table *tx_q;
1102 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1104 tx_q = &lcore_tx_queue[lcore_id];
1105 for (i = 0; i < rte_lcore_count(); i++) {
1106 if (lcore_ids[i] == lcore_id) {
1113 drain_mbuf_table(tx_q);
1116 * Inform the configuration core that we have exited the
1117 * linked list and that no devices are in use if requested.
1119 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1120 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1123 * Process vhost devices
1125 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1127 if (unlikely(vdev->remove)) {
1129 vdev->ready = DEVICE_SAFE_REMOVE;
1133 if (likely(vdev->ready == DEVICE_RX))
1136 if (likely(!vdev->remove))
1137 drain_virtio_tx(vdev);
1145 * Remove a device from the specific data core linked list and from the
1146 * main linked list. Synchonization occurs through the use of the
1147 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1148 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1151 destroy_device(int vid)
1153 struct vhost_dev *vdev = NULL;
1156 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1157 if (vdev->vid == vid)
1162 /*set the remove flag. */
1164 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1168 if (builtin_net_driver)
1169 vs_vhost_net_remove(vdev);
1171 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1173 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1176 /* Set the dev_removal_flag on each lcore. */
1177 RTE_LCORE_FOREACH_SLAVE(lcore)
1178 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1181 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1182 * we can be sure that they can no longer access the device removed
1183 * from the linked lists and that the devices are no longer in use.
1185 RTE_LCORE_FOREACH_SLAVE(lcore) {
1186 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1190 lcore_info[vdev->coreid].device_num--;
1192 RTE_LOG(INFO, VHOST_DATA,
1193 "(%d) device has been removed from data core\n",
1200 * A new device is added to a data core. First the device is added to the main linked list
1201 * and then allocated to a specific data core.
1206 int lcore, core_add = 0;
1207 uint32_t device_num_min = num_devices;
1208 struct vhost_dev *vdev;
1210 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1212 RTE_LOG(INFO, VHOST_DATA,
1213 "(%d) couldn't allocate memory for vhost dev\n",
1219 if (builtin_net_driver)
1220 vs_vhost_net_setup(vdev);
1222 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1223 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1225 /*reset ready flag*/
1226 vdev->ready = DEVICE_MAC_LEARNING;
1229 /* Find a suitable lcore to add the device. */
1230 RTE_LCORE_FOREACH_SLAVE(lcore) {
1231 if (lcore_info[lcore].device_num < device_num_min) {
1232 device_num_min = lcore_info[lcore].device_num;
1236 vdev->coreid = core_add;
1238 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1240 lcore_info[vdev->coreid].device_num++;
1242 /* Disable notifications. */
1243 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1244 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1246 RTE_LOG(INFO, VHOST_DATA,
1247 "(%d) device has been added to data core %d\n",
1254 * These callback allow devices to be added to the data core when configuration
1255 * has been fully complete.
1257 static const struct vhost_device_ops virtio_net_device_ops =
1259 .new_device = new_device,
1260 .destroy_device = destroy_device,
1264 * This is a thread will wake up after a period to print stats if the user has
1268 print_stats(__rte_unused void *arg)
1270 struct vhost_dev *vdev;
1271 uint64_t tx_dropped, rx_dropped;
1272 uint64_t tx, tx_total, rx, rx_total;
1273 const char clr[] = { 27, '[', '2', 'J', '\0' };
1274 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1277 sleep(enable_stats);
1279 /* Clear screen and move to top left */
1280 printf("%s%s\n", clr, top_left);
1281 printf("Device statistics =================================\n");
1283 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1284 tx_total = vdev->stats.tx_total;
1285 tx = vdev->stats.tx;
1286 tx_dropped = tx_total - tx;
1288 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1289 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1290 rx_dropped = rx_total - rx;
1292 printf("Statistics for device %d\n"
1293 "-----------------------\n"
1294 "TX total: %" PRIu64 "\n"
1295 "TX dropped: %" PRIu64 "\n"
1296 "TX successful: %" PRIu64 "\n"
1297 "RX total: %" PRIu64 "\n"
1298 "RX dropped: %" PRIu64 "\n"
1299 "RX successful: %" PRIu64 "\n",
1301 tx_total, tx_dropped, tx,
1302 rx_total, rx_dropped, rx);
1305 printf("===================================================\n");
1312 unregister_drivers(int socket_num)
1316 for (i = 0; i < socket_num; i++) {
1317 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1319 RTE_LOG(ERR, VHOST_CONFIG,
1320 "Fail to unregister vhost driver for %s.\n",
1321 socket_files + i * PATH_MAX);
1325 /* When we receive a INT signal, unregister vhost driver */
1327 sigint_handler(__rte_unused int signum)
1329 /* Unregister vhost driver. */
1330 unregister_drivers(nb_sockets);
1336 * While creating an mbuf pool, one key thing is to figure out how
1337 * many mbuf entries is enough for our use. FYI, here are some
1340 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1342 * - For each switch core (A CPU core does the packet switch), we need
1343 * also make some reservation for receiving the packets from virtio
1344 * Tx queue. How many is enough depends on the usage. It's normally
1345 * a simple calculation like following:
1347 * MAX_PKT_BURST * max packet size / mbuf size
1349 * So, we definitely need allocate more mbufs when TSO is enabled.
1351 * - Similarly, for each switching core, we should serve @nr_rx_desc
1352 * mbufs for receiving the packets from physical NIC device.
1354 * - We also need make sure, for each switch core, we have allocated
1355 * enough mbufs to fill up the mbuf cache.
1358 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1359 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1362 uint32_t nr_mbufs_per_core;
1363 uint32_t mtu = 1500;
1370 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1371 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1372 nr_mbufs_per_core += nr_rx_desc;
1373 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1375 nr_mbufs = nr_queues * nr_rx_desc;
1376 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1377 nr_mbufs *= nr_port;
1379 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1380 nr_mbuf_cache, 0, mbuf_size,
1382 if (mbuf_pool == NULL)
1383 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1387 * Main function, does initialisation and calls the per-lcore functions.
1390 main(int argc, char *argv[])
1392 unsigned lcore_id, core_id = 0;
1393 unsigned nb_ports, valid_num_ports;
1396 static pthread_t tid;
1399 signal(SIGINT, sigint_handler);
1402 ret = rte_eal_init(argc, argv);
1404 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1408 /* parse app arguments */
1409 ret = us_vhost_parse_args(argc, argv);
1411 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1413 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1414 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1416 if (rte_lcore_is_enabled(lcore_id))
1417 lcore_ids[core_id++] = lcore_id;
1420 if (rte_lcore_count() > RTE_MAX_LCORE)
1421 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1423 /* Get the number of physical ports. */
1424 nb_ports = rte_eth_dev_count_avail();
1427 * Update the global var NUM_PORTS and global array PORTS
1428 * and get value of var VALID_NUM_PORTS according to system ports number
1430 valid_num_ports = check_ports_num(nb_ports);
1432 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1433 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1434 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1439 * FIXME: here we are trying to allocate mbufs big enough for
1440 * @MAX_QUEUES, but the truth is we're never going to use that
1441 * many queues here. We probably should only do allocation for
1442 * those queues we are going to use.
1444 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1445 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1447 if (vm2vm_mode == VM2VM_HARDWARE) {
1448 /* Enable VT loop back to let L2 switch to do it. */
1449 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1450 RTE_LOG(DEBUG, VHOST_CONFIG,
1451 "Enable loop back for L2 switch in vmdq.\n");
1454 /* initialize all ports */
1455 RTE_ETH_FOREACH_DEV(portid) {
1456 /* skip ports that are not enabled */
1457 if ((enabled_port_mask & (1 << portid)) == 0) {
1458 RTE_LOG(INFO, VHOST_PORT,
1459 "Skipping disabled port %d\n", portid);
1462 if (port_init(portid) != 0)
1463 rte_exit(EXIT_FAILURE,
1464 "Cannot initialize network ports\n");
1467 /* Enable stats if the user option is set. */
1469 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1472 rte_exit(EXIT_FAILURE,
1473 "Cannot create print-stats thread\n");
1476 /* Launch all data cores. */
1477 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1478 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1481 flags |= RTE_VHOST_USER_CLIENT;
1483 if (dequeue_zero_copy)
1484 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1486 /* Register vhost user driver to handle vhost messages. */
1487 for (i = 0; i < nb_sockets; i++) {
1488 char *file = socket_files + i * PATH_MAX;
1489 ret = rte_vhost_driver_register(file, flags);
1491 unregister_drivers(i);
1492 rte_exit(EXIT_FAILURE,
1493 "vhost driver register failure.\n");
1496 if (builtin_net_driver)
1497 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1499 if (mergeable == 0) {
1500 rte_vhost_driver_disable_features(file,
1501 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1504 if (enable_tx_csum == 0) {
1505 rte_vhost_driver_disable_features(file,
1506 1ULL << VIRTIO_NET_F_CSUM);
1509 if (enable_tso == 0) {
1510 rte_vhost_driver_disable_features(file,
1511 1ULL << VIRTIO_NET_F_HOST_TSO4);
1512 rte_vhost_driver_disable_features(file,
1513 1ULL << VIRTIO_NET_F_HOST_TSO6);
1514 rte_vhost_driver_disable_features(file,
1515 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1516 rte_vhost_driver_disable_features(file,
1517 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1521 rte_vhost_driver_enable_features(file,
1522 1ULL << VIRTIO_NET_F_CTRL_RX);
1525 ret = rte_vhost_driver_callback_register(file,
1526 &virtio_net_device_ops);
1528 rte_exit(EXIT_FAILURE,
1529 "failed to register vhost driver callbacks.\n");
1532 if (rte_vhost_driver_start(file) < 0) {
1533 rte_exit(EXIT_FAILURE,
1534 "failed to start vhost driver.\n");
1538 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1539 rte_eal_wait_lcore(lcore_id);