1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
32 #define MAX_QUEUES 128
35 /* the maximum number of external ports supported */
36 #define MAX_SUP_PORTS 1
38 #define MBUF_CACHE_SIZE 128
39 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
41 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
43 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
44 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
46 #define JUMBO_FRAME_MAX_SIZE 0x2600
47 #define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
49 /* State of virtio device. */
50 #define DEVICE_MAC_LEARNING 0
52 #define DEVICE_SAFE_REMOVE 2
54 /* Configurable number of RX/TX ring descriptors */
55 #define RTE_TEST_RX_DESC_DEFAULT 1024
56 #define RTE_TEST_TX_DESC_DEFAULT 512
58 #define INVALID_PORT_ID 0xFF
60 /* mask of enabled ports */
61 static uint32_t enabled_port_mask = 0;
63 /* Promiscuous mode */
64 static uint32_t promiscuous;
66 /* number of devices/queues to support*/
67 static uint32_t num_queues = 0;
68 static uint32_t num_devices;
70 static struct rte_mempool *mbuf_pool;
73 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
80 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
83 static uint32_t enable_stats = 0;
84 /* Enable retries on RX. */
85 static uint32_t enable_retry = 1;
87 /* Disable TX checksum offload */
88 static uint32_t enable_tx_csum;
90 /* Disable TSO offload */
91 static uint32_t enable_tso;
93 static int client_mode;
95 static int builtin_net_driver;
97 static int async_vhost_driver;
99 static char *dma_type;
101 /* Specify timeout (in useconds) between retries on RX. */
102 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
103 /* Specify the number of retries on RX. */
104 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
106 /* Socket file paths. Can be set by user */
107 static char *socket_files;
108 static int nb_sockets;
110 /* empty VMDq configuration structure. Filled in programmatically */
111 static struct rte_eth_conf vmdq_conf_default = {
113 .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
116 * VLAN strip is necessary for 1G NIC such as I350,
117 * this fixes bug of ipv4 forwarding in guest can't
118 * forward packets from one virtio dev to another virtio dev.
120 .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
124 .mq_mode = RTE_ETH_MQ_TX_NONE,
125 .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
126 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
127 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
128 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
129 RTE_ETH_TX_OFFLOAD_TCP_TSO),
133 * should be overridden separately in code with
137 .nb_queue_pools = RTE_ETH_8_POOLS,
138 .enable_default_pool = 0,
141 .pool_map = {{0, 0},},
147 static unsigned lcore_ids[RTE_MAX_LCORE];
148 static uint16_t ports[RTE_MAX_ETHPORTS];
149 static unsigned num_ports = 0; /**< The number of ports specified in command line */
150 static uint16_t num_pf_queues, num_vmdq_queues;
151 static uint16_t vmdq_pool_base, vmdq_queue_base;
152 static uint16_t queues_per_pool;
154 const uint16_t vlan_tags[] = {
155 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
156 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
157 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
158 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
159 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
160 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
161 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
162 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
165 /* ethernet addresses of ports */
166 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
168 static struct vhost_dev_tailq_list vhost_dev_list =
169 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
171 static struct lcore_info lcore_info[RTE_MAX_LCORE];
173 /* Used for queueing bursts of TX packets. */
177 struct rte_mbuf *m_table[MAX_PKT_BURST];
180 struct vhost_bufftable {
183 struct rte_mbuf *m_table[MAX_PKT_BURST];
186 /* TX queue for each data core. */
187 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
190 * Vhost TX buffer for each data core.
191 * Every data core maintains a TX buffer for every vhost device,
192 * which is used for batch pkts enqueue for higher performance.
194 struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
196 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
197 / US_PER_S * BURST_TX_DRAIN_US)
200 open_dma(const char *value)
202 if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0)
203 return open_ioat(value);
209 * Builds up the correct configuration for VMDQ VLAN pool map
210 * according to the pool & queue limits.
213 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
215 struct rte_eth_vmdq_rx_conf conf;
216 struct rte_eth_vmdq_rx_conf *def_conf =
217 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
220 memset(&conf, 0, sizeof(conf));
221 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
222 conf.nb_pool_maps = num_devices;
223 conf.enable_loop_back = def_conf->enable_loop_back;
224 conf.rx_mode = def_conf->rx_mode;
226 for (i = 0; i < conf.nb_pool_maps; i++) {
227 conf.pool_map[i].vlan_id = vlan_tags[ i ];
228 conf.pool_map[i].pools = (1UL << i);
231 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
232 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
233 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
238 * Initialises a given port using global settings and with the rx buffers
239 * coming from the mbuf_pool passed as parameter
242 port_init(uint16_t port)
244 struct rte_eth_dev_info dev_info;
245 struct rte_eth_conf port_conf;
246 struct rte_eth_rxconf *rxconf;
247 struct rte_eth_txconf *txconf;
248 int16_t rx_rings, tx_rings;
249 uint16_t rx_ring_size, tx_ring_size;
253 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
254 retval = rte_eth_dev_info_get(port, &dev_info);
256 RTE_LOG(ERR, VHOST_PORT,
257 "Error during getting device (port %u) info: %s\n",
258 port, strerror(-retval));
263 rxconf = &dev_info.default_rxconf;
264 txconf = &dev_info.default_txconf;
265 rxconf->rx_drop_en = 1;
267 /*configure the number of supported virtio devices based on VMDQ limits */
268 num_devices = dev_info.max_vmdq_pools;
270 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
271 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
273 tx_rings = (uint16_t)rte_lcore_count();
276 if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
277 vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
279 vmdq_conf_default.rxmode.mtu = MAX_MTU;
282 /* Get port configuration. */
283 retval = get_eth_conf(&port_conf, num_devices);
286 /* NIC queues are divided into pf queues and vmdq queues. */
287 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
288 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
289 num_vmdq_queues = num_devices * queues_per_pool;
290 num_queues = num_pf_queues + num_vmdq_queues;
291 vmdq_queue_base = dev_info.vmdq_queue_base;
292 vmdq_pool_base = dev_info.vmdq_pool_base;
293 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
294 num_pf_queues, num_devices, queues_per_pool);
296 if (!rte_eth_dev_is_valid_port(port))
299 rx_rings = (uint16_t)dev_info.max_rx_queues;
300 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
301 port_conf.txmode.offloads |=
302 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
303 /* Configure ethernet device. */
304 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
306 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
307 port, strerror(-retval));
311 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
314 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
315 "for port %u: %s.\n", port, strerror(-retval));
318 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
319 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
320 "for Rx queues on port %u.\n", port);
324 /* Setup the queues. */
325 rxconf->offloads = port_conf.rxmode.offloads;
326 for (q = 0; q < rx_rings; q ++) {
327 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
328 rte_eth_dev_socket_id(port),
332 RTE_LOG(ERR, VHOST_PORT,
333 "Failed to setup rx queue %u of port %u: %s.\n",
334 q, port, strerror(-retval));
338 txconf->offloads = port_conf.txmode.offloads;
339 for (q = 0; q < tx_rings; q ++) {
340 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
341 rte_eth_dev_socket_id(port),
344 RTE_LOG(ERR, VHOST_PORT,
345 "Failed to setup tx queue %u of port %u: %s.\n",
346 q, port, strerror(-retval));
351 /* Start the device. */
352 retval = rte_eth_dev_start(port);
354 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
355 port, strerror(-retval));
360 retval = rte_eth_promiscuous_enable(port);
362 RTE_LOG(ERR, VHOST_PORT,
363 "Failed to enable promiscuous mode on port %u: %s\n",
364 port, rte_strerror(-retval));
369 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
371 RTE_LOG(ERR, VHOST_PORT,
372 "Failed to get MAC address on port %u: %s\n",
373 port, rte_strerror(-retval));
377 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
378 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
379 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
380 port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
386 * Set socket file path.
389 us_vhost_parse_socket_path(const char *q_arg)
393 /* parse number string */
394 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
398 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
399 if (socket_files == NULL) {
404 strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
411 * Parse the portmask provided at run time.
414 parse_portmask(const char *portmask)
421 /* parse hexadecimal string */
422 pm = strtoul(portmask, &end, 16);
423 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
431 * Parse num options at run time.
434 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
441 /* parse unsigned int string */
442 num = strtoul(q_arg, &end, 10);
443 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
446 if (num > max_valid_value)
457 us_vhost_usage(const char *prgname)
459 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
461 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
462 " --socket-file <path>\n"
464 " -p PORTMASK: Set mask for ports to be used by application\n"
465 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
466 " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
467 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
468 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
469 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
470 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
471 " --socket-file: The path of the socket file.\n"
472 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
473 " --tso [0|1] disable/enable TCP segment offload.\n"
474 " --client register a vhost-user socket as client mode.\n"
475 " --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
476 " --dmas register dma channel for specific vhost device.\n",
481 #define OPT_VM2VM "vm2vm"
483 #define OPT_RX_RETRY "rx-retry"
485 #define OPT_RX_RETRY_DELAY "rx-retry-delay"
486 OPT_RX_RETRY_DELAY_NUM,
487 #define OPT_RX_RETRY_NUMB "rx-retry-num"
488 OPT_RX_RETRY_NUMB_NUM,
489 #define OPT_MERGEABLE "mergeable"
491 #define OPT_STATS "stats"
493 #define OPT_SOCKET_FILE "socket-file"
495 #define OPT_TX_CSUM "tx-csum"
497 #define OPT_TSO "tso"
499 #define OPT_CLIENT "client"
501 #define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
502 OPT_BUILTIN_NET_DRIVER_NUM,
503 #define OPT_DMA_TYPE "dma-type"
505 #define OPT_DMAS "dmas"
510 * Parse the arguments given in the command line of the application.
513 us_vhost_parse_args(int argc, char **argv)
518 const char *prgname = argv[0];
519 static struct option long_option[] = {
520 {OPT_VM2VM, required_argument,
521 NULL, OPT_VM2VM_NUM},
522 {OPT_RX_RETRY, required_argument,
523 NULL, OPT_RX_RETRY_NUM},
524 {OPT_RX_RETRY_DELAY, required_argument,
525 NULL, OPT_RX_RETRY_DELAY_NUM},
526 {OPT_RX_RETRY_NUMB, required_argument,
527 NULL, OPT_RX_RETRY_NUMB_NUM},
528 {OPT_MERGEABLE, required_argument,
529 NULL, OPT_MERGEABLE_NUM},
530 {OPT_STATS, required_argument,
531 NULL, OPT_STATS_NUM},
532 {OPT_SOCKET_FILE, required_argument,
533 NULL, OPT_SOCKET_FILE_NUM},
534 {OPT_TX_CSUM, required_argument,
535 NULL, OPT_TX_CSUM_NUM},
536 {OPT_TSO, required_argument,
538 {OPT_CLIENT, no_argument,
539 NULL, OPT_CLIENT_NUM},
540 {OPT_BUILTIN_NET_DRIVER, no_argument,
541 NULL, OPT_BUILTIN_NET_DRIVER_NUM},
542 {OPT_DMA_TYPE, required_argument,
543 NULL, OPT_DMA_TYPE_NUM},
544 {OPT_DMAS, required_argument,
549 /* Parse command line */
550 while ((opt = getopt_long(argc, argv, "p:P",
551 long_option, &option_index)) != EOF) {
555 enabled_port_mask = parse_portmask(optarg);
556 if (enabled_port_mask == 0) {
557 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
558 us_vhost_usage(prgname);
565 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
566 RTE_ETH_VMDQ_ACCEPT_BROADCAST |
567 RTE_ETH_VMDQ_ACCEPT_MULTICAST;
571 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
573 RTE_LOG(INFO, VHOST_CONFIG,
574 "Invalid argument for "
576 us_vhost_usage(prgname);
579 vm2vm_mode = (vm2vm_type)ret;
582 case OPT_RX_RETRY_NUM:
583 ret = parse_num_opt(optarg, 1);
585 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
586 us_vhost_usage(prgname);
592 case OPT_TX_CSUM_NUM:
593 ret = parse_num_opt(optarg, 1);
595 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
596 us_vhost_usage(prgname);
599 enable_tx_csum = ret;
603 ret = parse_num_opt(optarg, 1);
605 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
606 us_vhost_usage(prgname);
612 case OPT_RX_RETRY_DELAY_NUM:
613 ret = parse_num_opt(optarg, INT32_MAX);
615 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
616 us_vhost_usage(prgname);
619 burst_rx_delay_time = ret;
622 case OPT_RX_RETRY_NUMB_NUM:
623 ret = parse_num_opt(optarg, INT32_MAX);
625 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
626 us_vhost_usage(prgname);
629 burst_rx_retry_num = ret;
632 case OPT_MERGEABLE_NUM:
633 ret = parse_num_opt(optarg, 1);
635 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
636 us_vhost_usage(prgname);
643 ret = parse_num_opt(optarg, INT32_MAX);
645 RTE_LOG(INFO, VHOST_CONFIG,
646 "Invalid argument for stats [0..N]\n");
647 us_vhost_usage(prgname);
653 /* Set socket file path. */
654 case OPT_SOCKET_FILE_NUM:
655 if (us_vhost_parse_socket_path(optarg) == -1) {
656 RTE_LOG(INFO, VHOST_CONFIG,
657 "Invalid argument for socket name (Max %d characters)\n",
659 us_vhost_usage(prgname);
664 case OPT_DMA_TYPE_NUM:
669 if (open_dma(optarg) == -1) {
670 RTE_LOG(INFO, VHOST_CONFIG,
672 us_vhost_usage(prgname);
675 async_vhost_driver = 1;
682 case OPT_BUILTIN_NET_DRIVER_NUM:
683 builtin_net_driver = 1;
686 /* Invalid option - print options. */
688 us_vhost_usage(prgname);
693 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
694 if (enabled_port_mask & (1 << i))
695 ports[num_ports++] = i;
698 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
699 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
700 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
708 * Update the global var NUM_PORTS and array PORTS according to system ports number
709 * and return valid ports number
711 static unsigned check_ports_num(unsigned nb_ports)
713 unsigned valid_num_ports = num_ports;
716 if (num_ports > nb_ports) {
717 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
718 num_ports, nb_ports);
719 num_ports = nb_ports;
722 for (portid = 0; portid < num_ports; portid ++) {
723 if (!rte_eth_dev_is_valid_port(ports[portid])) {
724 RTE_LOG(INFO, VHOST_PORT,
725 "\nSpecified port ID(%u) is not valid\n",
727 ports[portid] = INVALID_PORT_ID;
731 return valid_num_ports;
734 static __rte_always_inline struct vhost_dev *
735 find_vhost_dev(struct rte_ether_addr *mac)
737 struct vhost_dev *vdev;
739 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
740 if (vdev->ready == DEVICE_RX &&
741 rte_is_same_ether_addr(mac, &vdev->mac_address))
749 * This function learns the MAC address of the device and registers this along with a
750 * vlan tag to a VMDQ.
753 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
755 struct rte_ether_hdr *pkt_hdr;
758 /* Learn MAC address of guest device from packet */
759 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
761 if (find_vhost_dev(&pkt_hdr->src_addr)) {
762 RTE_LOG(ERR, VHOST_DATA,
763 "(%d) device is using a registered MAC!\n",
768 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
769 vdev->mac_address.addr_bytes[i] =
770 pkt_hdr->src_addr.addr_bytes[i];
772 /* vlan_tag currently uses the device_id. */
773 vdev->vlan_tag = vlan_tags[vdev->vid];
775 /* Print out VMDQ registration info. */
776 RTE_LOG(INFO, VHOST_DATA,
777 "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
778 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
781 /* Register the MAC address. */
782 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
783 (uint32_t)vdev->vid + vmdq_pool_base);
785 RTE_LOG(ERR, VHOST_DATA,
786 "(%d) failed to add device MAC address to VMDQ\n",
789 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
791 /* Set device as ready for RX. */
792 vdev->ready = DEVICE_RX;
798 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
799 * queue before disabling RX on the device.
802 unlink_vmdq(struct vhost_dev *vdev)
806 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
808 if (vdev->ready == DEVICE_RX) {
809 /*clear MAC and VLAN settings*/
810 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
811 for (i = 0; i < 6; i++)
812 vdev->mac_address.addr_bytes[i] = 0;
816 /*Clear out the receive buffers*/
817 rx_count = rte_eth_rx_burst(ports[0],
818 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
821 for (i = 0; i < rx_count; i++)
822 rte_pktmbuf_free(pkts_burst[i]);
824 rx_count = rte_eth_rx_burst(ports[0],
825 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
828 vdev->ready = DEVICE_MAC_LEARNING;
833 free_pkts(struct rte_mbuf **pkts, uint16_t n)
836 rte_pktmbuf_free(pkts[n]);
839 static __rte_always_inline void
840 complete_async_pkts(struct vhost_dev *vdev)
842 struct rte_mbuf *p_cpl[MAX_PKT_BURST];
843 uint16_t complete_count;
845 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
846 VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
847 if (complete_count) {
848 free_pkts(p_cpl, complete_count);
849 __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
854 static __rte_always_inline void
855 sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
860 if (builtin_net_driver) {
861 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
863 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
867 __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
869 __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
871 src_vdev->stats.tx_total++;
872 src_vdev->stats.tx += ret;
876 static __rte_always_inline void
877 drain_vhost(struct vhost_dev *vdev)
880 uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
881 uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
882 struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
884 if (builtin_net_driver) {
885 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
886 } else if (async_vhost_driver) {
887 uint16_t enqueue_fail = 0;
889 complete_async_pkts(vdev);
890 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
891 __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
893 enqueue_fail = nr_xmit - ret;
895 free_pkts(&m[ret], nr_xmit - ret);
897 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
902 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
904 __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
908 if (!async_vhost_driver)
909 free_pkts(m, nr_xmit);
912 static __rte_always_inline void
913 drain_vhost_table(void)
915 uint16_t lcore_id = rte_lcore_id();
916 struct vhost_bufftable *vhost_txq;
917 struct vhost_dev *vdev;
920 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
921 if (unlikely(vdev->remove == 1))
924 vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE
927 cur_tsc = rte_rdtsc();
928 if (unlikely(cur_tsc - vhost_txq->pre_tsc
929 > MBUF_TABLE_DRAIN_TSC)) {
930 RTE_LOG_DP(DEBUG, VHOST_DATA,
931 "Vhost TX queue drained after timeout with burst size %u\n",
935 vhost_txq->pre_tsc = cur_tsc;
941 * Check if the packet destination MAC address is for a local device. If so then put
942 * the packet on that devices RX queue. If not then return.
944 static __rte_always_inline int
945 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
947 struct rte_ether_hdr *pkt_hdr;
948 struct vhost_dev *dst_vdev;
949 struct vhost_bufftable *vhost_txq;
950 uint16_t lcore_id = rte_lcore_id();
951 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
953 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
957 if (vdev->vid == dst_vdev->vid) {
958 RTE_LOG_DP(DEBUG, VHOST_DATA,
959 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
964 RTE_LOG_DP(DEBUG, VHOST_DATA,
965 "(%d) TX: MAC address is local\n", dst_vdev->vid);
967 if (unlikely(dst_vdev->remove)) {
968 RTE_LOG_DP(DEBUG, VHOST_DATA,
969 "(%d) device is marked for removal\n", dst_vdev->vid);
973 vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid];
974 vhost_txq->m_table[vhost_txq->len++] = m;
977 vdev->stats.tx_total++;
981 if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
982 drain_vhost(dst_vdev);
984 vhost_txq->pre_tsc = rte_rdtsc();
990 * Check if the destination MAC of a packet is one local VM,
991 * and get its vlan tag, and offset if it is.
993 static __rte_always_inline int
994 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
995 uint32_t *offset, uint16_t *vlan_tag)
997 struct vhost_dev *dst_vdev;
998 struct rte_ether_hdr *pkt_hdr =
999 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1001 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1005 if (vdev->vid == dst_vdev->vid) {
1006 RTE_LOG_DP(DEBUG, VHOST_DATA,
1007 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1013 * HW vlan strip will reduce the packet length
1014 * by minus length of vlan tag, so need restore
1015 * the packet length by plus it.
1017 *offset = RTE_VLAN_HLEN;
1018 *vlan_tag = vlan_tags[vdev->vid];
1020 RTE_LOG_DP(DEBUG, VHOST_DATA,
1021 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1022 vdev->vid, dst_vdev->vid, *vlan_tag);
1027 static void virtio_tx_offload(struct rte_mbuf *m)
1029 struct rte_net_hdr_lens hdr_lens;
1030 struct rte_ipv4_hdr *ipv4_hdr;
1031 struct rte_tcp_hdr *tcp_hdr;
1035 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1036 m->l2_len = hdr_lens.l2_len;
1037 m->l3_len = hdr_lens.l3_len;
1038 m->l4_len = hdr_lens.l4_len;
1040 l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
1041 tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
1042 m->l2_len + m->l3_len);
1044 m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1045 if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
1046 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
1047 m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1049 ipv4_hdr->hdr_checksum = 0;
1050 tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
1051 } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
1052 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
1053 tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
1057 static __rte_always_inline void
1058 do_drain_mbuf_table(struct mbuf_table *tx_q)
1062 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1063 tx_q->m_table, tx_q->len);
1064 if (unlikely(count < tx_q->len))
1065 free_pkts(&tx_q->m_table[count], tx_q->len - count);
1071 * This function routes the TX packet to the correct interface. This
1072 * may be a local device or the physical port.
1074 static __rte_always_inline void
1075 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1077 struct mbuf_table *tx_q;
1078 unsigned offset = 0;
1079 const uint16_t lcore_id = rte_lcore_id();
1080 struct rte_ether_hdr *nh;
1083 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1084 if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
1085 struct vhost_dev *vdev2;
1087 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1089 sync_virtio_xmit(vdev2, vdev, m);
1094 /*check if destination is local VM*/
1095 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
1098 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1099 if (unlikely(find_local_dest(vdev, m, &offset,
1101 rte_pktmbuf_free(m);
1106 RTE_LOG_DP(DEBUG, VHOST_DATA,
1107 "(%d) TX: MAC address is external\n", vdev->vid);
1111 /*Add packet to the port tx queue*/
1112 tx_q = &lcore_tx_queue[lcore_id];
1114 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1115 if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
1116 /* Guest has inserted the vlan tag. */
1117 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
1118 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
1119 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1120 (vh->vlan_tci != vlan_tag_be))
1121 vh->vlan_tci = vlan_tag_be;
1123 m->ol_flags |= RTE_MBUF_F_TX_VLAN;
1126 * Find the right seg to adjust the data len when offset is
1127 * bigger than tail room size.
1129 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1130 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1131 m->data_len += offset;
1133 struct rte_mbuf *seg = m;
1135 while ((seg->next != NULL) &&
1136 (offset > rte_pktmbuf_tailroom(seg)))
1139 seg->data_len += offset;
1141 m->pkt_len += offset;
1144 m->vlan_tci = vlan_tag;
1147 if (m->ol_flags & RTE_MBUF_F_RX_LRO)
1148 virtio_tx_offload(m);
1150 tx_q->m_table[tx_q->len++] = m;
1152 vdev->stats.tx_total++;
1156 if (unlikely(tx_q->len == MAX_PKT_BURST))
1157 do_drain_mbuf_table(tx_q);
1161 static __rte_always_inline void
1162 drain_mbuf_table(struct mbuf_table *tx_q)
1164 static uint64_t prev_tsc;
1170 cur_tsc = rte_rdtsc();
1171 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1174 RTE_LOG_DP(DEBUG, VHOST_DATA,
1175 "TX queue drained after timeout with burst size %u\n",
1177 do_drain_mbuf_table(tx_q);
1181 static __rte_always_inline void
1182 drain_eth_rx(struct vhost_dev *vdev)
1184 uint16_t rx_count, enqueue_count;
1185 struct rte_mbuf *pkts[MAX_PKT_BURST];
1187 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1188 pkts, MAX_PKT_BURST);
1194 * When "enable_retry" is set, here we wait and retry when there
1195 * is no enough free slots in the queue to hold @rx_count packets,
1196 * to diminish packet loss.
1199 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1203 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1204 rte_delay_us(burst_rx_delay_time);
1205 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1211 if (builtin_net_driver) {
1212 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1214 } else if (async_vhost_driver) {
1215 uint16_t enqueue_fail = 0;
1217 complete_async_pkts(vdev);
1218 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1219 VIRTIO_RXQ, pkts, rx_count);
1220 __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
1222 enqueue_fail = rx_count - enqueue_count;
1224 free_pkts(&pkts[enqueue_count], enqueue_fail);
1227 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1232 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1234 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1238 if (!async_vhost_driver)
1239 free_pkts(pkts, rx_count);
1242 static __rte_always_inline void
1243 drain_virtio_tx(struct vhost_dev *vdev)
1245 struct rte_mbuf *pkts[MAX_PKT_BURST];
1249 if (builtin_net_driver) {
1250 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1251 pkts, MAX_PKT_BURST);
1253 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1254 mbuf_pool, pkts, MAX_PKT_BURST);
1257 /* setup VMDq for the first packet */
1258 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1259 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1260 free_pkts(pkts, count);
1263 for (i = 0; i < count; ++i)
1264 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1268 * Main function of vhost-switch. It basically does:
1270 * for each vhost device {
1273 * Which drains the host eth Rx queue linked to the vhost device,
1274 * and deliver all of them to guest virito Rx ring associated with
1275 * this vhost device.
1277 * - drain_virtio_tx()
1279 * Which drains the guest virtio Tx queue and deliver all of them
1280 * to the target, which could be another vhost device, or the
1281 * physical eth dev. The route is done in function "virtio_tx_route".
1285 switch_worker(void *arg __rte_unused)
1288 unsigned lcore_id = rte_lcore_id();
1289 struct vhost_dev *vdev;
1290 struct mbuf_table *tx_q;
1292 RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
1294 tx_q = &lcore_tx_queue[lcore_id];
1295 for (i = 0; i < rte_lcore_count(); i++) {
1296 if (lcore_ids[i] == lcore_id) {
1303 drain_mbuf_table(tx_q);
1304 drain_vhost_table();
1306 * Inform the configuration core that we have exited the
1307 * linked list and that no devices are in use if requested.
1309 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1310 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1313 * Process vhost devices
1315 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1317 if (unlikely(vdev->remove)) {
1319 vdev->ready = DEVICE_SAFE_REMOVE;
1323 if (likely(vdev->ready == DEVICE_RX))
1326 if (likely(!vdev->remove))
1327 drain_virtio_tx(vdev);
1335 * Remove a device from the specific data core linked list and from the
1336 * main linked list. Synchronization occurs through the use of the
1337 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1338 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1341 destroy_device(int vid)
1343 struct vhost_dev *vdev = NULL;
1347 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1348 if (vdev->vid == vid)
1353 /*set the remove flag. */
1355 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1359 for (i = 0; i < RTE_MAX_LCORE; i++)
1360 rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);
1362 if (builtin_net_driver)
1363 vs_vhost_net_remove(vdev);
1365 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1367 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1370 /* Set the dev_removal_flag on each lcore. */
1371 RTE_LCORE_FOREACH_WORKER(lcore)
1372 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1375 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1376 * we can be sure that they can no longer access the device removed
1377 * from the linked lists and that the devices are no longer in use.
1379 RTE_LCORE_FOREACH_WORKER(lcore) {
1380 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1384 lcore_info[vdev->coreid].device_num--;
1386 RTE_LOG(INFO, VHOST_DATA,
1387 "(%d) device has been removed from data core\n",
1390 if (async_vhost_driver) {
1392 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1394 while (vdev->pkts_inflight) {
1395 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
1396 m_cpl, vdev->pkts_inflight);
1397 free_pkts(m_cpl, n_pkt);
1398 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1401 rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1408 * A new device is added to a data core. First the device is added to the main linked list
1409 * and then allocated to a specific data core.
1414 int lcore, core_add = 0;
1416 uint32_t device_num_min = num_devices;
1417 struct vhost_dev *vdev;
1418 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1420 RTE_LOG(INFO, VHOST_DATA,
1421 "(%d) couldn't allocate memory for vhost dev\n",
1427 for (i = 0; i < RTE_MAX_LCORE; i++) {
1428 vhost_txbuff[i * MAX_VHOST_DEVICE + vid]
1429 = rte_zmalloc("vhost bufftable",
1430 sizeof(struct vhost_bufftable),
1431 RTE_CACHE_LINE_SIZE);
1433 if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {
1434 RTE_LOG(INFO, VHOST_DATA,
1435 "(%d) couldn't allocate memory for vhost TX\n", vid);
1440 if (builtin_net_driver)
1441 vs_vhost_net_setup(vdev);
1443 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1444 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1446 /*reset ready flag*/
1447 vdev->ready = DEVICE_MAC_LEARNING;
1450 /* Find a suitable lcore to add the device. */
1451 RTE_LCORE_FOREACH_WORKER(lcore) {
1452 if (lcore_info[lcore].device_num < device_num_min) {
1453 device_num_min = lcore_info[lcore].device_num;
1457 vdev->coreid = core_add;
1459 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1461 lcore_info[vdev->coreid].device_num++;
1463 /* Disable notifications. */
1464 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1465 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1467 RTE_LOG(INFO, VHOST_DATA,
1468 "(%d) device has been added to data core %d\n",
1471 if (async_vhost_driver) {
1472 struct rte_vhost_async_config config = {0};
1473 struct rte_vhost_async_channel_ops channel_ops;
1475 if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) {
1476 channel_ops.transfer_data = ioat_transfer_data_cb;
1477 channel_ops.check_completed_copies =
1478 ioat_check_completed_copies_cb;
1480 config.features = RTE_VHOST_ASYNC_INORDER;
1482 return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
1483 config, &channel_ops);
1491 vring_state_changed(int vid, uint16_t queue_id, int enable)
1493 struct vhost_dev *vdev = NULL;
1495 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1496 if (vdev->vid == vid)
1502 if (queue_id != VIRTIO_RXQ)
1505 if (async_vhost_driver) {
1508 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1510 while (vdev->pkts_inflight) {
1511 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
1512 m_cpl, vdev->pkts_inflight);
1513 free_pkts(m_cpl, n_pkt);
1514 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1523 * These callback allow devices to be added to the data core when configuration
1524 * has been fully complete.
1526 static const struct rte_vhost_device_ops virtio_net_device_ops =
1528 .new_device = new_device,
1529 .destroy_device = destroy_device,
1530 .vring_state_changed = vring_state_changed,
1534 * This is a thread will wake up after a period to print stats if the user has
1538 print_stats(__rte_unused void *arg)
1540 struct vhost_dev *vdev;
1541 uint64_t tx_dropped, rx_dropped;
1542 uint64_t tx, tx_total, rx, rx_total;
1543 const char clr[] = { 27, '[', '2', 'J', '\0' };
1544 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1547 sleep(enable_stats);
1549 /* Clear screen and move to top left */
1550 printf("%s%s\n", clr, top_left);
1551 printf("Device statistics =================================\n");
1553 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1554 tx_total = vdev->stats.tx_total;
1555 tx = vdev->stats.tx;
1556 tx_dropped = tx_total - tx;
1558 rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1560 rx = __atomic_load_n(&vdev->stats.rx_atomic,
1562 rx_dropped = rx_total - rx;
1564 printf("Statistics for device %d\n"
1565 "-----------------------\n"
1566 "TX total: %" PRIu64 "\n"
1567 "TX dropped: %" PRIu64 "\n"
1568 "TX successful: %" PRIu64 "\n"
1569 "RX total: %" PRIu64 "\n"
1570 "RX dropped: %" PRIu64 "\n"
1571 "RX successful: %" PRIu64 "\n",
1573 tx_total, tx_dropped, tx,
1574 rx_total, rx_dropped, rx);
1577 printf("===================================================\n");
1586 unregister_drivers(int socket_num)
1590 for (i = 0; i < socket_num; i++) {
1591 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1593 RTE_LOG(ERR, VHOST_CONFIG,
1594 "Fail to unregister vhost driver for %s.\n",
1595 socket_files + i * PATH_MAX);
1599 /* When we receive a INT signal, unregister vhost driver */
1601 sigint_handler(__rte_unused int signum)
1603 /* Unregister vhost driver. */
1604 unregister_drivers(nb_sockets);
1610 * While creating an mbuf pool, one key thing is to figure out how
1611 * many mbuf entries is enough for our use. FYI, here are some
1614 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1616 * - For each switch core (A CPU core does the packet switch), we need
1617 * also make some reservation for receiving the packets from virtio
1618 * Tx queue. How many is enough depends on the usage. It's normally
1619 * a simple calculation like following:
1621 * MAX_PKT_BURST * max packet size / mbuf size
1623 * So, we definitely need allocate more mbufs when TSO is enabled.
1625 * - Similarly, for each switching core, we should serve @nr_rx_desc
1626 * mbufs for receiving the packets from physical NIC device.
1628 * - We also need make sure, for each switch core, we have allocated
1629 * enough mbufs to fill up the mbuf cache.
1632 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1633 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1636 uint32_t nr_mbufs_per_core;
1637 uint32_t mtu = 1500;
1644 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1645 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1646 nr_mbufs_per_core += nr_rx_desc;
1647 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1649 nr_mbufs = nr_queues * nr_rx_desc;
1650 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1651 nr_mbufs *= nr_port;
1653 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1654 nr_mbuf_cache, 0, mbuf_size,
1656 if (mbuf_pool == NULL)
1657 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1661 * Main function, does initialisation and calls the per-lcore functions.
1664 main(int argc, char *argv[])
1666 unsigned lcore_id, core_id = 0;
1667 unsigned nb_ports, valid_num_ports;
1670 static pthread_t tid;
1671 uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
1673 signal(SIGINT, sigint_handler);
1676 ret = rte_eal_init(argc, argv);
1678 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1682 /* parse app arguments */
1683 ret = us_vhost_parse_args(argc, argv);
1685 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1687 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1688 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1690 if (rte_lcore_is_enabled(lcore_id))
1691 lcore_ids[core_id++] = lcore_id;
1694 if (rte_lcore_count() > RTE_MAX_LCORE)
1695 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1697 /* Get the number of physical ports. */
1698 nb_ports = rte_eth_dev_count_avail();
1701 * Update the global var NUM_PORTS and global array PORTS
1702 * and get value of var VALID_NUM_PORTS according to system ports number
1704 valid_num_ports = check_ports_num(nb_ports);
1706 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1707 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1708 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1713 * FIXME: here we are trying to allocate mbufs big enough for
1714 * @MAX_QUEUES, but the truth is we're never going to use that
1715 * many queues here. We probably should only do allocation for
1716 * those queues we are going to use.
1718 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1719 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1721 if (vm2vm_mode == VM2VM_HARDWARE) {
1722 /* Enable VT loop back to let L2 switch to do it. */
1723 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1724 RTE_LOG(DEBUG, VHOST_CONFIG,
1725 "Enable loop back for L2 switch in vmdq.\n");
1728 /* initialize all ports */
1729 RTE_ETH_FOREACH_DEV(portid) {
1730 /* skip ports that are not enabled */
1731 if ((enabled_port_mask & (1 << portid)) == 0) {
1732 RTE_LOG(INFO, VHOST_PORT,
1733 "Skipping disabled port %d\n", portid);
1736 if (port_init(portid) != 0)
1737 rte_exit(EXIT_FAILURE,
1738 "Cannot initialize network ports\n");
1741 /* Enable stats if the user option is set. */
1743 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1746 rte_exit(EXIT_FAILURE,
1747 "Cannot create print-stats thread\n");
1750 /* Launch all data cores. */
1751 RTE_LCORE_FOREACH_WORKER(lcore_id)
1752 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1755 flags |= RTE_VHOST_USER_CLIENT;
1757 /* Register vhost user driver to handle vhost messages. */
1758 for (i = 0; i < nb_sockets; i++) {
1759 char *file = socket_files + i * PATH_MAX;
1761 if (async_vhost_driver)
1762 flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1764 ret = rte_vhost_driver_register(file, flags);
1766 unregister_drivers(i);
1767 rte_exit(EXIT_FAILURE,
1768 "vhost driver register failure.\n");
1771 if (builtin_net_driver)
1772 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1774 if (mergeable == 0) {
1775 rte_vhost_driver_disable_features(file,
1776 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1779 if (enable_tx_csum == 0) {
1780 rte_vhost_driver_disable_features(file,
1781 1ULL << VIRTIO_NET_F_CSUM);
1784 if (enable_tso == 0) {
1785 rte_vhost_driver_disable_features(file,
1786 1ULL << VIRTIO_NET_F_HOST_TSO4);
1787 rte_vhost_driver_disable_features(file,
1788 1ULL << VIRTIO_NET_F_HOST_TSO6);
1789 rte_vhost_driver_disable_features(file,
1790 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1791 rte_vhost_driver_disable_features(file,
1792 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1796 rte_vhost_driver_enable_features(file,
1797 1ULL << VIRTIO_NET_F_CTRL_RX);
1800 ret = rte_vhost_driver_callback_register(file,
1801 &virtio_net_device_ops);
1803 rte_exit(EXIT_FAILURE,
1804 "failed to register vhost driver callbacks.\n");
1807 if (rte_vhost_driver_start(file) < 0) {
1808 rte_exit(EXIT_FAILURE,
1809 "failed to start vhost driver.\n");
1813 RTE_LCORE_FOREACH_WORKER(lcore_id)
1814 rte_eal_wait_lcore(lcore_id);
1816 /* clean up the EAL */