1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
32 #define MAX_QUEUES 128
35 /* the maximum number of external ports supported */
36 #define MAX_SUP_PORTS 1
38 #define MBUF_CACHE_SIZE 128
39 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
41 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
43 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
44 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
46 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 #define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
49 /* State of virtio device. */
50 #define DEVICE_MAC_LEARNING 0
52 #define DEVICE_SAFE_REMOVE 2
54 /* Configurable number of RX/TX ring descriptors */
55 #define RTE_TEST_RX_DESC_DEFAULT 1024
56 #define RTE_TEST_TX_DESC_DEFAULT 512
58 #define INVALID_PORT_ID 0xFF
60 /* mask of enabled ports */
61 static uint32_t enabled_port_mask = 0;
63 /* Promiscuous mode */
64 static uint32_t promiscuous;
66 /* number of devices/queues to support*/
67 static uint32_t num_queues = 0;
68 static uint32_t num_devices;
70 static struct rte_mempool *mbuf_pool;
73 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
80 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
83 static uint32_t enable_stats = 0;
84 /* Enable retries on RX. */
85 static uint32_t enable_retry = 1;
87 /* Disable TX checksum offload */
88 static uint32_t enable_tx_csum;
90 /* Disable TSO offload */
91 static uint32_t enable_tso;
93 static int client_mode;
95 static int builtin_net_driver;
97 static int async_vhost_driver;
99 static char *dma_type;
101 /* Specify timeout (in useconds) between retries on RX. */
102 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
103 /* Specify the number of retries on RX. */
104 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
106 /* Socket file paths. Can be set by user */
107 static char *socket_files;
108 static int nb_sockets;
110 /* empty vmdq configuration structure. Filled in programatically */
111 static struct rte_eth_conf vmdq_conf_default = {
113 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
116 * VLAN strip is necessary for 1G NIC such as I350,
117 * this fixes bug of ipv4 forwarding in guest can't
118 * forward pakets from one virtio dev to another virtio dev.
120 .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
124 .mq_mode = ETH_MQ_TX_NONE,
125 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
126 DEV_TX_OFFLOAD_TCP_CKSUM |
127 DEV_TX_OFFLOAD_VLAN_INSERT |
128 DEV_TX_OFFLOAD_MULTI_SEGS |
129 DEV_TX_OFFLOAD_TCP_TSO),
133 * should be overridden separately in code with
137 .nb_queue_pools = ETH_8_POOLS,
138 .enable_default_pool = 0,
141 .pool_map = {{0, 0},},
147 static unsigned lcore_ids[RTE_MAX_LCORE];
148 static uint16_t ports[RTE_MAX_ETHPORTS];
149 static unsigned num_ports = 0; /**< The number of ports specified in command line */
150 static uint16_t num_pf_queues, num_vmdq_queues;
151 static uint16_t vmdq_pool_base, vmdq_queue_base;
152 static uint16_t queues_per_pool;
154 const uint16_t vlan_tags[] = {
155 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
156 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
157 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
158 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
159 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
160 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
161 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
162 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
165 /* ethernet addresses of ports */
166 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
168 static struct vhost_dev_tailq_list vhost_dev_list =
169 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
171 static struct lcore_info lcore_info[RTE_MAX_LCORE];
173 /* Used for queueing bursts of TX packets. */
177 struct rte_mbuf *m_table[MAX_PKT_BURST];
180 struct vhost_bufftable {
183 struct rte_mbuf *m_table[MAX_PKT_BURST];
186 /* TX queue for each data core. */
187 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
190 * Vhost TX buffer for each data core.
191 * Every data core maintains a TX buffer for every vhost device,
192 * which is used for batch pkts enqueue for higher performance.
194 struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
196 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
197 / US_PER_S * BURST_TX_DRAIN_US)
201 open_dma(const char *value)
203 if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0)
204 return open_ioat(value);
210 * Builds up the correct configuration for VMDQ VLAN pool map
211 * according to the pool & queue limits.
214 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
216 struct rte_eth_vmdq_rx_conf conf;
217 struct rte_eth_vmdq_rx_conf *def_conf =
218 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
221 memset(&conf, 0, sizeof(conf));
222 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
223 conf.nb_pool_maps = num_devices;
224 conf.enable_loop_back = def_conf->enable_loop_back;
225 conf.rx_mode = def_conf->rx_mode;
227 for (i = 0; i < conf.nb_pool_maps; i++) {
228 conf.pool_map[i].vlan_id = vlan_tags[ i ];
229 conf.pool_map[i].pools = (1UL << i);
232 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
233 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
234 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
239 * Initialises a given port using global settings and with the rx buffers
240 * coming from the mbuf_pool passed as parameter
243 port_init(uint16_t port)
245 struct rte_eth_dev_info dev_info;
246 struct rte_eth_conf port_conf;
247 struct rte_eth_rxconf *rxconf;
248 struct rte_eth_txconf *txconf;
249 int16_t rx_rings, tx_rings;
250 uint16_t rx_ring_size, tx_ring_size;
254 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
255 retval = rte_eth_dev_info_get(port, &dev_info);
257 RTE_LOG(ERR, VHOST_PORT,
258 "Error during getting device (port %u) info: %s\n",
259 port, strerror(-retval));
264 rxconf = &dev_info.default_rxconf;
265 txconf = &dev_info.default_txconf;
266 rxconf->rx_drop_en = 1;
268 /*configure the number of supported virtio devices based on VMDQ limits */
269 num_devices = dev_info.max_vmdq_pools;
271 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
272 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
274 tx_rings = (uint16_t)rte_lcore_count();
276 /* Get port configuration. */
277 retval = get_eth_conf(&port_conf, num_devices);
280 /* NIC queues are divided into pf queues and vmdq queues. */
281 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
282 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
283 num_vmdq_queues = num_devices * queues_per_pool;
284 num_queues = num_pf_queues + num_vmdq_queues;
285 vmdq_queue_base = dev_info.vmdq_queue_base;
286 vmdq_pool_base = dev_info.vmdq_pool_base;
287 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
288 num_pf_queues, num_devices, queues_per_pool);
290 if (!rte_eth_dev_is_valid_port(port))
293 rx_rings = (uint16_t)dev_info.max_rx_queues;
294 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
295 port_conf.txmode.offloads |=
296 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
297 /* Configure ethernet device. */
298 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
300 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
301 port, strerror(-retval));
305 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
308 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
309 "for port %u: %s.\n", port, strerror(-retval));
312 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
313 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
314 "for Rx queues on port %u.\n", port);
318 /* Setup the queues. */
319 rxconf->offloads = port_conf.rxmode.offloads;
320 for (q = 0; q < rx_rings; q ++) {
321 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
322 rte_eth_dev_socket_id(port),
326 RTE_LOG(ERR, VHOST_PORT,
327 "Failed to setup rx queue %u of port %u: %s.\n",
328 q, port, strerror(-retval));
332 txconf->offloads = port_conf.txmode.offloads;
333 for (q = 0; q < tx_rings; q ++) {
334 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
335 rte_eth_dev_socket_id(port),
338 RTE_LOG(ERR, VHOST_PORT,
339 "Failed to setup tx queue %u of port %u: %s.\n",
340 q, port, strerror(-retval));
345 /* Start the device. */
346 retval = rte_eth_dev_start(port);
348 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
349 port, strerror(-retval));
354 retval = rte_eth_promiscuous_enable(port);
356 RTE_LOG(ERR, VHOST_PORT,
357 "Failed to enable promiscuous mode on port %u: %s\n",
358 port, rte_strerror(-retval));
363 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
365 RTE_LOG(ERR, VHOST_PORT,
366 "Failed to get MAC address on port %u: %s\n",
367 port, rte_strerror(-retval));
371 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
372 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
373 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
374 port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
380 * Set socket file path.
383 us_vhost_parse_socket_path(const char *q_arg)
387 /* parse number string */
388 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
392 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
393 if (socket_files == NULL) {
398 strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
405 * Parse the portmask provided at run time.
408 parse_portmask(const char *portmask)
415 /* parse hexadecimal string */
416 pm = strtoul(portmask, &end, 16);
417 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
425 * Parse num options at run time.
428 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
435 /* parse unsigned int string */
436 num = strtoul(q_arg, &end, 10);
437 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
440 if (num > max_valid_value)
451 us_vhost_usage(const char *prgname)
453 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
455 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
456 " --socket-file <path>\n"
458 " -p PORTMASK: Set mask for ports to be used by application\n"
459 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
460 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
461 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
462 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
463 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
464 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
465 " --socket-file: The path of the socket file.\n"
466 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
467 " --tso [0|1] disable/enable TCP segment offload.\n"
468 " --client register a vhost-user socket as client mode.\n"
469 " --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
470 " --dmas register dma channel for specific vhost device.\n",
475 #define OPT_VM2VM "vm2vm"
477 #define OPT_RX_RETRY "rx-retry"
479 #define OPT_RX_RETRY_DELAY "rx-retry-delay"
480 OPT_RX_RETRY_DELAY_NUM,
481 #define OPT_RX_RETRY_NUMB "rx-retry-num"
482 OPT_RX_RETRY_NUMB_NUM,
483 #define OPT_MERGEABLE "mergeable"
485 #define OPT_STATS "stats"
487 #define OPT_SOCKET_FILE "socket-file"
489 #define OPT_TX_CSUM "tx-csum"
491 #define OPT_TSO "tso"
493 #define OPT_CLIENT "client"
495 #define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
496 OPT_BUILTIN_NET_DRIVER_NUM,
497 #define OPT_DMA_TYPE "dma-type"
499 #define OPT_DMAS "dmas"
504 * Parse the arguments given in the command line of the application.
507 us_vhost_parse_args(int argc, char **argv)
512 const char *prgname = argv[0];
513 static struct option long_option[] = {
514 {OPT_VM2VM, required_argument,
515 NULL, OPT_VM2VM_NUM},
516 {OPT_RX_RETRY, required_argument,
517 NULL, OPT_RX_RETRY_NUM},
518 {OPT_RX_RETRY_DELAY, required_argument,
519 NULL, OPT_RX_RETRY_DELAY_NUM},
520 {OPT_RX_RETRY_NUMB, required_argument,
521 NULL, OPT_RX_RETRY_NUMB_NUM},
522 {OPT_MERGEABLE, required_argument,
523 NULL, OPT_MERGEABLE_NUM},
524 {OPT_STATS, required_argument,
525 NULL, OPT_STATS_NUM},
526 {OPT_SOCKET_FILE, required_argument,
527 NULL, OPT_SOCKET_FILE_NUM},
528 {OPT_TX_CSUM, required_argument,
529 NULL, OPT_TX_CSUM_NUM},
530 {OPT_TSO, required_argument,
532 {OPT_CLIENT, no_argument,
533 NULL, OPT_CLIENT_NUM},
534 {OPT_BUILTIN_NET_DRIVER, no_argument,
535 NULL, OPT_BUILTIN_NET_DRIVER_NUM},
536 {OPT_DMA_TYPE, required_argument,
537 NULL, OPT_DMA_TYPE_NUM},
538 {OPT_DMAS, required_argument,
543 /* Parse command line */
544 while ((opt = getopt_long(argc, argv, "p:P",
545 long_option, &option_index)) != EOF) {
549 enabled_port_mask = parse_portmask(optarg);
550 if (enabled_port_mask == 0) {
551 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
552 us_vhost_usage(prgname);
559 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
560 ETH_VMDQ_ACCEPT_BROADCAST |
561 ETH_VMDQ_ACCEPT_MULTICAST;
565 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
567 RTE_LOG(INFO, VHOST_CONFIG,
568 "Invalid argument for "
570 us_vhost_usage(prgname);
573 vm2vm_mode = (vm2vm_type)ret;
576 case OPT_RX_RETRY_NUM:
577 ret = parse_num_opt(optarg, 1);
579 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
580 us_vhost_usage(prgname);
586 case OPT_TX_CSUM_NUM:
587 ret = parse_num_opt(optarg, 1);
589 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
590 us_vhost_usage(prgname);
593 enable_tx_csum = ret;
597 ret = parse_num_opt(optarg, 1);
599 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
600 us_vhost_usage(prgname);
606 case OPT_RX_RETRY_DELAY_NUM:
607 ret = parse_num_opt(optarg, INT32_MAX);
609 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
610 us_vhost_usage(prgname);
613 burst_rx_delay_time = ret;
616 case OPT_RX_RETRY_NUMB_NUM:
617 ret = parse_num_opt(optarg, INT32_MAX);
619 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
620 us_vhost_usage(prgname);
623 burst_rx_retry_num = ret;
626 case OPT_MERGEABLE_NUM:
627 ret = parse_num_opt(optarg, 1);
629 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
630 us_vhost_usage(prgname);
635 vmdq_conf_default.rxmode.mtu = MAX_MTU;
639 ret = parse_num_opt(optarg, INT32_MAX);
641 RTE_LOG(INFO, VHOST_CONFIG,
642 "Invalid argument for stats [0..N]\n");
643 us_vhost_usage(prgname);
649 /* Set socket file path. */
650 case OPT_SOCKET_FILE_NUM:
651 if (us_vhost_parse_socket_path(optarg) == -1) {
652 RTE_LOG(INFO, VHOST_CONFIG,
653 "Invalid argument for socket name (Max %d characters)\n",
655 us_vhost_usage(prgname);
660 case OPT_DMA_TYPE_NUM:
665 if (open_dma(optarg) == -1) {
666 RTE_LOG(INFO, VHOST_CONFIG,
668 us_vhost_usage(prgname);
671 async_vhost_driver = 1;
678 case OPT_BUILTIN_NET_DRIVER_NUM:
679 builtin_net_driver = 1;
682 /* Invalid option - print options. */
684 us_vhost_usage(prgname);
689 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
690 if (enabled_port_mask & (1 << i))
691 ports[num_ports++] = i;
694 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
695 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
696 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
704 * Update the global var NUM_PORTS and array PORTS according to system ports number
705 * and return valid ports number
707 static unsigned check_ports_num(unsigned nb_ports)
709 unsigned valid_num_ports = num_ports;
712 if (num_ports > nb_ports) {
713 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
714 num_ports, nb_ports);
715 num_ports = nb_ports;
718 for (portid = 0; portid < num_ports; portid ++) {
719 if (!rte_eth_dev_is_valid_port(ports[portid])) {
720 RTE_LOG(INFO, VHOST_PORT,
721 "\nSpecified port ID(%u) is not valid\n",
723 ports[portid] = INVALID_PORT_ID;
727 return valid_num_ports;
730 static __rte_always_inline struct vhost_dev *
731 find_vhost_dev(struct rte_ether_addr *mac)
733 struct vhost_dev *vdev;
735 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
736 if (vdev->ready == DEVICE_RX &&
737 rte_is_same_ether_addr(mac, &vdev->mac_address))
745 * This function learns the MAC address of the device and registers this along with a
746 * vlan tag to a VMDQ.
749 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
751 struct rte_ether_hdr *pkt_hdr;
754 /* Learn MAC address of guest device from packet */
755 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
757 if (find_vhost_dev(&pkt_hdr->src_addr)) {
758 RTE_LOG(ERR, VHOST_DATA,
759 "(%d) device is using a registered MAC!\n",
764 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
765 vdev->mac_address.addr_bytes[i] =
766 pkt_hdr->src_addr.addr_bytes[i];
768 /* vlan_tag currently uses the device_id. */
769 vdev->vlan_tag = vlan_tags[vdev->vid];
771 /* Print out VMDQ registration info. */
772 RTE_LOG(INFO, VHOST_DATA,
773 "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
774 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
777 /* Register the MAC address. */
778 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
779 (uint32_t)vdev->vid + vmdq_pool_base);
781 RTE_LOG(ERR, VHOST_DATA,
782 "(%d) failed to add device MAC address to VMDQ\n",
785 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
787 /* Set device as ready for RX. */
788 vdev->ready = DEVICE_RX;
794 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
795 * queue before disabling RX on the device.
798 unlink_vmdq(struct vhost_dev *vdev)
802 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
804 if (vdev->ready == DEVICE_RX) {
805 /*clear MAC and VLAN settings*/
806 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
807 for (i = 0; i < 6; i++)
808 vdev->mac_address.addr_bytes[i] = 0;
812 /*Clear out the receive buffers*/
813 rx_count = rte_eth_rx_burst(ports[0],
814 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
817 for (i = 0; i < rx_count; i++)
818 rte_pktmbuf_free(pkts_burst[i]);
820 rx_count = rte_eth_rx_burst(ports[0],
821 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
824 vdev->ready = DEVICE_MAC_LEARNING;
829 free_pkts(struct rte_mbuf **pkts, uint16_t n)
832 rte_pktmbuf_free(pkts[n]);
835 static __rte_always_inline void
836 complete_async_pkts(struct vhost_dev *vdev)
838 struct rte_mbuf *p_cpl[MAX_PKT_BURST];
839 uint16_t complete_count;
841 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
842 VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
843 if (complete_count) {
844 free_pkts(p_cpl, complete_count);
845 __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
850 static __rte_always_inline void
851 sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
856 if (builtin_net_driver) {
857 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
859 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
863 __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
865 __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
867 src_vdev->stats.tx_total++;
868 src_vdev->stats.tx += ret;
872 static __rte_always_inline void
873 drain_vhost(struct vhost_dev *vdev)
876 uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
877 uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
878 struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
880 if (builtin_net_driver) {
881 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
882 } else if (async_vhost_driver) {
883 uint16_t enqueue_fail = 0;
885 complete_async_pkts(vdev);
886 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
887 __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
889 enqueue_fail = nr_xmit - ret;
891 free_pkts(&m[ret], nr_xmit - ret);
893 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
898 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
900 __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
904 if (!async_vhost_driver)
905 free_pkts(m, nr_xmit);
908 static __rte_always_inline void
909 drain_vhost_table(void)
911 uint16_t lcore_id = rte_lcore_id();
912 struct vhost_bufftable *vhost_txq;
913 struct vhost_dev *vdev;
916 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
917 vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE
920 cur_tsc = rte_rdtsc();
921 if (unlikely(cur_tsc - vhost_txq->pre_tsc
922 > MBUF_TABLE_DRAIN_TSC)) {
923 RTE_LOG_DP(DEBUG, VHOST_DATA,
924 "Vhost TX queue drained after timeout with burst size %u\n",
928 vhost_txq->pre_tsc = cur_tsc;
934 * Check if the packet destination MAC address is for a local device. If so then put
935 * the packet on that devices RX queue. If not then return.
937 static __rte_always_inline int
938 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
940 struct rte_ether_hdr *pkt_hdr;
941 struct vhost_dev *dst_vdev;
942 struct vhost_bufftable *vhost_txq;
943 uint16_t lcore_id = rte_lcore_id();
944 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
946 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
950 if (vdev->vid == dst_vdev->vid) {
951 RTE_LOG_DP(DEBUG, VHOST_DATA,
952 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
957 RTE_LOG_DP(DEBUG, VHOST_DATA,
958 "(%d) TX: MAC address is local\n", dst_vdev->vid);
960 if (unlikely(dst_vdev->remove)) {
961 RTE_LOG_DP(DEBUG, VHOST_DATA,
962 "(%d) device is marked for removal\n", dst_vdev->vid);
966 vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid];
967 vhost_txq->m_table[vhost_txq->len++] = m;
970 vdev->stats.tx_total++;
974 if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
975 drain_vhost(dst_vdev);
977 vhost_txq->pre_tsc = rte_rdtsc();
983 * Check if the destination MAC of a packet is one local VM,
984 * and get its vlan tag, and offset if it is.
986 static __rte_always_inline int
987 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
988 uint32_t *offset, uint16_t *vlan_tag)
990 struct vhost_dev *dst_vdev;
991 struct rte_ether_hdr *pkt_hdr =
992 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
994 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
998 if (vdev->vid == dst_vdev->vid) {
999 RTE_LOG_DP(DEBUG, VHOST_DATA,
1000 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1006 * HW vlan strip will reduce the packet length
1007 * by minus length of vlan tag, so need restore
1008 * the packet length by plus it.
1010 *offset = VLAN_HLEN;
1011 *vlan_tag = vlan_tags[vdev->vid];
1013 RTE_LOG_DP(DEBUG, VHOST_DATA,
1014 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1015 vdev->vid, dst_vdev->vid, *vlan_tag);
1020 static void virtio_tx_offload(struct rte_mbuf *m)
1022 struct rte_net_hdr_lens hdr_lens;
1023 struct rte_ipv4_hdr *ipv4_hdr;
1024 struct rte_tcp_hdr *tcp_hdr;
1028 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1029 m->l2_len = hdr_lens.l2_len;
1030 m->l3_len = hdr_lens.l3_len;
1031 m->l4_len = hdr_lens.l4_len;
1033 l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
1034 tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
1035 m->l2_len + m->l3_len);
1037 m->ol_flags |= PKT_TX_TCP_SEG;
1038 if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
1039 m->ol_flags |= PKT_TX_IPV4;
1040 m->ol_flags |= PKT_TX_IP_CKSUM;
1042 ipv4_hdr->hdr_checksum = 0;
1043 tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
1044 } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
1045 m->ol_flags |= PKT_TX_IPV6;
1046 tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
1050 static __rte_always_inline void
1051 do_drain_mbuf_table(struct mbuf_table *tx_q)
1055 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1056 tx_q->m_table, tx_q->len);
1057 if (unlikely(count < tx_q->len))
1058 free_pkts(&tx_q->m_table[count], tx_q->len - count);
1064 * This function routes the TX packet to the correct interface. This
1065 * may be a local device or the physical port.
1067 static __rte_always_inline void
1068 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1070 struct mbuf_table *tx_q;
1071 unsigned offset = 0;
1072 const uint16_t lcore_id = rte_lcore_id();
1073 struct rte_ether_hdr *nh;
1076 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1077 if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
1078 struct vhost_dev *vdev2;
1080 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1082 sync_virtio_xmit(vdev2, vdev, m);
1087 /*check if destination is local VM*/
1088 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
1091 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1092 if (unlikely(find_local_dest(vdev, m, &offset,
1094 rte_pktmbuf_free(m);
1099 RTE_LOG_DP(DEBUG, VHOST_DATA,
1100 "(%d) TX: MAC address is external\n", vdev->vid);
1104 /*Add packet to the port tx queue*/
1105 tx_q = &lcore_tx_queue[lcore_id];
1107 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1108 if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
1109 /* Guest has inserted the vlan tag. */
1110 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
1111 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
1112 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1113 (vh->vlan_tci != vlan_tag_be))
1114 vh->vlan_tci = vlan_tag_be;
1116 m->ol_flags |= PKT_TX_VLAN_PKT;
1119 * Find the right seg to adjust the data len when offset is
1120 * bigger than tail room size.
1122 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1123 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1124 m->data_len += offset;
1126 struct rte_mbuf *seg = m;
1128 while ((seg->next != NULL) &&
1129 (offset > rte_pktmbuf_tailroom(seg)))
1132 seg->data_len += offset;
1134 m->pkt_len += offset;
1137 m->vlan_tci = vlan_tag;
1140 if (m->ol_flags & PKT_RX_LRO)
1141 virtio_tx_offload(m);
1143 tx_q->m_table[tx_q->len++] = m;
1145 vdev->stats.tx_total++;
1149 if (unlikely(tx_q->len == MAX_PKT_BURST))
1150 do_drain_mbuf_table(tx_q);
1154 static __rte_always_inline void
1155 drain_mbuf_table(struct mbuf_table *tx_q)
1157 static uint64_t prev_tsc;
1163 cur_tsc = rte_rdtsc();
1164 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1167 RTE_LOG_DP(DEBUG, VHOST_DATA,
1168 "TX queue drained after timeout with burst size %u\n",
1170 do_drain_mbuf_table(tx_q);
1174 static __rte_always_inline void
1175 drain_eth_rx(struct vhost_dev *vdev)
1177 uint16_t rx_count, enqueue_count;
1178 struct rte_mbuf *pkts[MAX_PKT_BURST];
1180 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1181 pkts, MAX_PKT_BURST);
1187 * When "enable_retry" is set, here we wait and retry when there
1188 * is no enough free slots in the queue to hold @rx_count packets,
1189 * to diminish packet loss.
1192 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1196 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1197 rte_delay_us(burst_rx_delay_time);
1198 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1204 if (builtin_net_driver) {
1205 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1207 } else if (async_vhost_driver) {
1208 uint16_t enqueue_fail = 0;
1210 complete_async_pkts(vdev);
1211 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1212 VIRTIO_RXQ, pkts, rx_count);
1213 __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
1215 enqueue_fail = rx_count - enqueue_count;
1217 free_pkts(&pkts[enqueue_count], enqueue_fail);
1220 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1225 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1227 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1231 if (!async_vhost_driver)
1232 free_pkts(pkts, rx_count);
1235 static __rte_always_inline void
1236 drain_virtio_tx(struct vhost_dev *vdev)
1238 struct rte_mbuf *pkts[MAX_PKT_BURST];
1242 if (builtin_net_driver) {
1243 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1244 pkts, MAX_PKT_BURST);
1246 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1247 mbuf_pool, pkts, MAX_PKT_BURST);
1250 /* setup VMDq for the first packet */
1251 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1252 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1253 free_pkts(pkts, count);
1256 for (i = 0; i < count; ++i)
1257 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1261 * Main function of vhost-switch. It basically does:
1263 * for each vhost device {
1266 * Which drains the host eth Rx queue linked to the vhost device,
1267 * and deliver all of them to guest virito Rx ring associated with
1268 * this vhost device.
1270 * - drain_virtio_tx()
1272 * Which drains the guest virtio Tx queue and deliver all of them
1273 * to the target, which could be another vhost device, or the
1274 * physical eth dev. The route is done in function "virtio_tx_route".
1278 switch_worker(void *arg __rte_unused)
1281 unsigned lcore_id = rte_lcore_id();
1282 struct vhost_dev *vdev;
1283 struct mbuf_table *tx_q;
1285 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1287 tx_q = &lcore_tx_queue[lcore_id];
1288 for (i = 0; i < rte_lcore_count(); i++) {
1289 if (lcore_ids[i] == lcore_id) {
1296 drain_mbuf_table(tx_q);
1297 drain_vhost_table();
1299 * Inform the configuration core that we have exited the
1300 * linked list and that no devices are in use if requested.
1302 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1303 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1306 * Process vhost devices
1308 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1310 if (unlikely(vdev->remove)) {
1312 vdev->ready = DEVICE_SAFE_REMOVE;
1316 if (likely(vdev->ready == DEVICE_RX))
1319 if (likely(!vdev->remove))
1320 drain_virtio_tx(vdev);
1328 * Remove a device from the specific data core linked list and from the
1329 * main linked list. Synchonization occurs through the use of the
1330 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1331 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1334 destroy_device(int vid)
1336 struct vhost_dev *vdev = NULL;
1340 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1341 if (vdev->vid == vid)
1346 /*set the remove flag. */
1348 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1352 for (i = 0; i < RTE_MAX_LCORE; i++)
1353 rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);
1355 if (builtin_net_driver)
1356 vs_vhost_net_remove(vdev);
1358 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1360 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1363 /* Set the dev_removal_flag on each lcore. */
1364 RTE_LCORE_FOREACH_WORKER(lcore)
1365 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1368 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1369 * we can be sure that they can no longer access the device removed
1370 * from the linked lists and that the devices are no longer in use.
1372 RTE_LCORE_FOREACH_WORKER(lcore) {
1373 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1377 lcore_info[vdev->coreid].device_num--;
1379 RTE_LOG(INFO, VHOST_DATA,
1380 "(%d) device has been removed from data core\n",
1383 if (async_vhost_driver) {
1385 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1387 while (vdev->pkts_inflight) {
1388 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
1389 m_cpl, vdev->pkts_inflight);
1390 free_pkts(m_cpl, n_pkt);
1391 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1394 rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1401 * A new device is added to a data core. First the device is added to the main linked list
1402 * and then allocated to a specific data core.
1407 int lcore, core_add = 0;
1409 uint32_t device_num_min = num_devices;
1410 struct vhost_dev *vdev;
1411 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1413 RTE_LOG(INFO, VHOST_DATA,
1414 "(%d) couldn't allocate memory for vhost dev\n",
1420 for (i = 0; i < RTE_MAX_LCORE; i++) {
1421 vhost_txbuff[i * MAX_VHOST_DEVICE + vid]
1422 = rte_zmalloc("vhost bufftable",
1423 sizeof(struct vhost_bufftable),
1424 RTE_CACHE_LINE_SIZE);
1426 if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {
1427 RTE_LOG(INFO, VHOST_DATA,
1428 "(%d) couldn't allocate memory for vhost TX\n", vid);
1433 if (builtin_net_driver)
1434 vs_vhost_net_setup(vdev);
1436 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1437 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1439 /*reset ready flag*/
1440 vdev->ready = DEVICE_MAC_LEARNING;
1443 /* Find a suitable lcore to add the device. */
1444 RTE_LCORE_FOREACH_WORKER(lcore) {
1445 if (lcore_info[lcore].device_num < device_num_min) {
1446 device_num_min = lcore_info[lcore].device_num;
1450 vdev->coreid = core_add;
1452 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1454 lcore_info[vdev->coreid].device_num++;
1456 /* Disable notifications. */
1457 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1458 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1460 RTE_LOG(INFO, VHOST_DATA,
1461 "(%d) device has been added to data core %d\n",
1464 if (async_vhost_driver) {
1465 struct rte_vhost_async_config config = {0};
1466 struct rte_vhost_async_channel_ops channel_ops;
1468 if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) {
1469 channel_ops.transfer_data = ioat_transfer_data_cb;
1470 channel_ops.check_completed_copies =
1471 ioat_check_completed_copies_cb;
1473 config.features = RTE_VHOST_ASYNC_INORDER;
1475 return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
1476 config, &channel_ops);
1484 vring_state_changed(int vid, uint16_t queue_id, int enable)
1486 struct vhost_dev *vdev = NULL;
1488 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1489 if (vdev->vid == vid)
1495 if (queue_id != VIRTIO_RXQ)
1498 if (async_vhost_driver) {
1501 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1503 while (vdev->pkts_inflight) {
1504 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
1505 m_cpl, vdev->pkts_inflight);
1506 free_pkts(m_cpl, n_pkt);
1507 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1516 * These callback allow devices to be added to the data core when configuration
1517 * has been fully complete.
1519 static const struct vhost_device_ops virtio_net_device_ops =
1521 .new_device = new_device,
1522 .destroy_device = destroy_device,
1523 .vring_state_changed = vring_state_changed,
1527 * This is a thread will wake up after a period to print stats if the user has
1531 print_stats(__rte_unused void *arg)
1533 struct vhost_dev *vdev;
1534 uint64_t tx_dropped, rx_dropped;
1535 uint64_t tx, tx_total, rx, rx_total;
1536 const char clr[] = { 27, '[', '2', 'J', '\0' };
1537 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1540 sleep(enable_stats);
1542 /* Clear screen and move to top left */
1543 printf("%s%s\n", clr, top_left);
1544 printf("Device statistics =================================\n");
1546 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1547 tx_total = vdev->stats.tx_total;
1548 tx = vdev->stats.tx;
1549 tx_dropped = tx_total - tx;
1551 rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1553 rx = __atomic_load_n(&vdev->stats.rx_atomic,
1555 rx_dropped = rx_total - rx;
1557 printf("Statistics for device %d\n"
1558 "-----------------------\n"
1559 "TX total: %" PRIu64 "\n"
1560 "TX dropped: %" PRIu64 "\n"
1561 "TX successful: %" PRIu64 "\n"
1562 "RX total: %" PRIu64 "\n"
1563 "RX dropped: %" PRIu64 "\n"
1564 "RX successful: %" PRIu64 "\n",
1566 tx_total, tx_dropped, tx,
1567 rx_total, rx_dropped, rx);
1570 printf("===================================================\n");
1579 unregister_drivers(int socket_num)
1583 for (i = 0; i < socket_num; i++) {
1584 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1586 RTE_LOG(ERR, VHOST_CONFIG,
1587 "Fail to unregister vhost driver for %s.\n",
1588 socket_files + i * PATH_MAX);
1592 /* When we receive a INT signal, unregister vhost driver */
1594 sigint_handler(__rte_unused int signum)
1596 /* Unregister vhost driver. */
1597 unregister_drivers(nb_sockets);
1603 * While creating an mbuf pool, one key thing is to figure out how
1604 * many mbuf entries is enough for our use. FYI, here are some
1607 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1609 * - For each switch core (A CPU core does the packet switch), we need
1610 * also make some reservation for receiving the packets from virtio
1611 * Tx queue. How many is enough depends on the usage. It's normally
1612 * a simple calculation like following:
1614 * MAX_PKT_BURST * max packet size / mbuf size
1616 * So, we definitely need allocate more mbufs when TSO is enabled.
1618 * - Similarly, for each switching core, we should serve @nr_rx_desc
1619 * mbufs for receiving the packets from physical NIC device.
1621 * - We also need make sure, for each switch core, we have allocated
1622 * enough mbufs to fill up the mbuf cache.
1625 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1626 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1629 uint32_t nr_mbufs_per_core;
1630 uint32_t mtu = 1500;
1637 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1638 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1639 nr_mbufs_per_core += nr_rx_desc;
1640 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1642 nr_mbufs = nr_queues * nr_rx_desc;
1643 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1644 nr_mbufs *= nr_port;
1646 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1647 nr_mbuf_cache, 0, mbuf_size,
1649 if (mbuf_pool == NULL)
1650 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1654 * Main function, does initialisation and calls the per-lcore functions.
1657 main(int argc, char *argv[])
1659 unsigned lcore_id, core_id = 0;
1660 unsigned nb_ports, valid_num_ports;
1663 static pthread_t tid;
1664 uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
1666 signal(SIGINT, sigint_handler);
1669 ret = rte_eal_init(argc, argv);
1671 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1675 /* parse app arguments */
1676 ret = us_vhost_parse_args(argc, argv);
1678 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1680 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1681 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1683 if (rte_lcore_is_enabled(lcore_id))
1684 lcore_ids[core_id++] = lcore_id;
1687 if (rte_lcore_count() > RTE_MAX_LCORE)
1688 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1690 /* Get the number of physical ports. */
1691 nb_ports = rte_eth_dev_count_avail();
1694 * Update the global var NUM_PORTS and global array PORTS
1695 * and get value of var VALID_NUM_PORTS according to system ports number
1697 valid_num_ports = check_ports_num(nb_ports);
1699 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1700 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1701 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1706 * FIXME: here we are trying to allocate mbufs big enough for
1707 * @MAX_QUEUES, but the truth is we're never going to use that
1708 * many queues here. We probably should only do allocation for
1709 * those queues we are going to use.
1711 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1712 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1714 if (vm2vm_mode == VM2VM_HARDWARE) {
1715 /* Enable VT loop back to let L2 switch to do it. */
1716 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1717 RTE_LOG(DEBUG, VHOST_CONFIG,
1718 "Enable loop back for L2 switch in vmdq.\n");
1721 /* initialize all ports */
1722 RTE_ETH_FOREACH_DEV(portid) {
1723 /* skip ports that are not enabled */
1724 if ((enabled_port_mask & (1 << portid)) == 0) {
1725 RTE_LOG(INFO, VHOST_PORT,
1726 "Skipping disabled port %d\n", portid);
1729 if (port_init(portid) != 0)
1730 rte_exit(EXIT_FAILURE,
1731 "Cannot initialize network ports\n");
1734 /* Enable stats if the user option is set. */
1736 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1739 rte_exit(EXIT_FAILURE,
1740 "Cannot create print-stats thread\n");
1743 /* Launch all data cores. */
1744 RTE_LCORE_FOREACH_WORKER(lcore_id)
1745 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1748 flags |= RTE_VHOST_USER_CLIENT;
1750 /* Register vhost user driver to handle vhost messages. */
1751 for (i = 0; i < nb_sockets; i++) {
1752 char *file = socket_files + i * PATH_MAX;
1754 if (async_vhost_driver)
1755 flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1757 ret = rte_vhost_driver_register(file, flags);
1759 unregister_drivers(i);
1760 rte_exit(EXIT_FAILURE,
1761 "vhost driver register failure.\n");
1764 if (builtin_net_driver)
1765 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1767 if (mergeable == 0) {
1768 rte_vhost_driver_disable_features(file,
1769 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1772 if (enable_tx_csum == 0) {
1773 rte_vhost_driver_disable_features(file,
1774 1ULL << VIRTIO_NET_F_CSUM);
1777 if (enable_tso == 0) {
1778 rte_vhost_driver_disable_features(file,
1779 1ULL << VIRTIO_NET_F_HOST_TSO4);
1780 rte_vhost_driver_disable_features(file,
1781 1ULL << VIRTIO_NET_F_HOST_TSO6);
1782 rte_vhost_driver_disable_features(file,
1783 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1784 rte_vhost_driver_disable_features(file,
1785 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1789 rte_vhost_driver_enable_features(file,
1790 1ULL << VIRTIO_NET_F_CTRL_RX);
1793 ret = rte_vhost_driver_callback_register(file,
1794 &virtio_net_device_ops);
1796 rte_exit(EXIT_FAILURE,
1797 "failed to register vhost driver callbacks.\n");
1800 if (rte_vhost_driver_start(file) < 0) {
1801 rte_exit(EXIT_FAILURE,
1802 "failed to start vhost driver.\n");
1806 RTE_LCORE_FOREACH_WORKER(lcore_id)
1807 rte_eal_wait_lcore(lcore_id);
1809 /* clean up the EAL */