4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
56 #define MAX_QUEUES 128
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
65 (num_switching_cores*MAX_PKT_BURST) + \
66 (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
67 (num_switching_cores*MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 128
70 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73 * No frame data buffer allocated from host are required for zero copy
74 * implementation, guest will allocate the frame data buffer, and vhost
77 #define VIRTIO_DESCRIPTOR_LEN_ZCP 1518
78 #define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \
79 + RTE_PKTMBUF_HEADROOM)
80 #define MBUF_CACHE_SIZE_ZCP 0
83 * RX and TX Prefetch, Host, and Write-back threshold values should be
84 * carefully set for optimal performance. Consult the network
85 * controller's datasheet and supporting DPDK documentation for guidance
86 * on how these parameters should be set.
88 #define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
89 #define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
90 #define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
93 * These default values are optimized for use with the Intel(R) 82599 10 GbE
94 * Controller and the DPDK ixgbe PMD. Consider using other values for other
95 * network controllers and/or network drivers.
97 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
98 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
99 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
101 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
102 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
104 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
105 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
107 #define JUMBO_FRAME_MAX_SIZE 0x2600
109 /* State of virtio device. */
110 #define DEVICE_MAC_LEARNING 0
112 #define DEVICE_SAFE_REMOVE 2
114 /* Config_core_flag status definitions. */
115 #define REQUEST_DEV_REMOVAL 1
116 #define ACK_DEV_REMOVAL 0
118 /* Configurable number of RX/TX ring descriptors */
119 #define RTE_TEST_RX_DESC_DEFAULT 1024
120 #define RTE_TEST_TX_DESC_DEFAULT 512
123 * Need refine these 2 macros for legacy and DPDK based front end:
124 * Max vring avail descriptor/entries from guest - MAX_PKT_BURST
125 * And then adjust power 2.
128 * For legacy front end, 128 descriptors,
129 * half for virtio header, another half for mbuf.
131 #define RTE_TEST_RX_DESC_DEFAULT_ZCP 32 /* legacy: 32, DPDK virt FE: 128. */
132 #define RTE_TEST_TX_DESC_DEFAULT_ZCP 64 /* legacy: 64, DPDK virt FE: 64. */
134 /* Get first 4 bytes in mbuf headroom. */
135 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
136 + sizeof(struct rte_mbuf)))
138 /* true if x is a power of 2 */
139 #define POWEROF2(x) ((((x)-1) & (x)) == 0)
141 #define INVALID_PORT_ID 0xFF
143 /* Max number of devices. Limited by vmdq. */
144 #define MAX_DEVICES 64
146 /* Size of buffers used for snprintfs. */
147 #define MAX_PRINT_BUFF 6072
149 /* Maximum character device basename size. */
150 #define MAX_BASENAME_SZ 10
152 /* Maximum long option length for option parsing. */
153 #define MAX_LONG_OPT_SZ 64
155 /* Used to compare MAC addresses. */
156 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
158 /* Number of descriptors per cacheline. */
159 #define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
161 /* mask of enabled ports */
162 static uint32_t enabled_port_mask = 0;
164 /*Number of switching cores enabled*/
165 static uint32_t num_switching_cores = 0;
167 /* number of devices/queues to support*/
168 static uint32_t num_queues = 0;
169 static uint32_t num_devices;
172 * Enable zero copy, pkts buffer will directly dma to hw descriptor,
173 * disabled on default.
175 static uint32_t zero_copy;
176 static int mergeable;
178 /* number of descriptors to apply*/
179 static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP;
180 static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP;
182 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
183 #define MAX_RING_DESC 4096
186 struct rte_mempool *pool;
187 struct rte_ring *ring;
189 } vpool_array[MAX_QUEUES+MAX_QUEUES];
191 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
198 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
200 /* The type of host physical address translated from guest physical address. */
202 PHYS_ADDR_CONTINUOUS = 0,
203 PHYS_ADDR_CROSS_SUBREG = 1,
204 PHYS_ADDR_INVALID = 2,
209 static uint32_t enable_stats = 0;
210 /* Enable retries on RX. */
211 static uint32_t enable_retry = 1;
212 /* Specify timeout (in useconds) between retries on RX. */
213 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
214 /* Specify the number of retries on RX. */
215 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
217 /* Character device basename. Can be set by user. */
218 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
221 /* Default configuration for rx and tx thresholds etc. */
222 static struct rte_eth_rxconf rx_conf_default = {
224 .pthresh = RX_PTHRESH,
225 .hthresh = RX_HTHRESH,
226 .wthresh = RX_WTHRESH,
232 * These default values are optimized for use with the Intel(R) 82599 10 GbE
233 * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
234 * network controllers and/or network drivers.
236 static struct rte_eth_txconf tx_conf_default = {
238 .pthresh = TX_PTHRESH,
239 .hthresh = TX_HTHRESH,
240 .wthresh = TX_WTHRESH,
242 .tx_free_thresh = 0, /* Use PMD default values */
243 .tx_rs_thresh = 0, /* Use PMD default values */
246 /* empty vmdq configuration structure. Filled in programatically */
247 static struct rte_eth_conf vmdq_conf_default = {
249 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
251 .header_split = 0, /**< Header Split disabled */
252 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
253 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
255 * It is necessary for 1G NIC such as I350,
256 * this fixes bug of ipv4 forwarding in guest can't
257 * forward pakets from one virtio dev to another virtio dev.
259 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
260 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
261 .hw_strip_crc = 0, /**< CRC stripped by hardware */
265 .mq_mode = ETH_MQ_TX_NONE,
269 * should be overridden separately in code with
273 .nb_queue_pools = ETH_8_POOLS,
274 .enable_default_pool = 0,
277 .pool_map = {{0, 0},},
282 static unsigned lcore_ids[RTE_MAX_LCORE];
283 static uint8_t ports[RTE_MAX_ETHPORTS];
284 static unsigned num_ports = 0; /**< The number of ports specified in command line */
286 static const uint16_t external_pkt_default_vlan_tag = 2000;
287 const uint16_t vlan_tags[] = {
288 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
289 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
290 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
291 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
292 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
293 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
294 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
295 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
298 /* ethernet addresses of ports */
299 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
301 /* heads for the main used and free linked lists for the data path. */
302 static struct virtio_net_data_ll *ll_root_used = NULL;
303 static struct virtio_net_data_ll *ll_root_free = NULL;
305 /* Array of data core structures containing information on individual core linked lists. */
306 static struct lcore_info lcore_info[RTE_MAX_LCORE];
308 /* Used for queueing bursts of TX packets. */
312 struct rte_mbuf *m_table[MAX_PKT_BURST];
315 /* TX queue for each data core. */
316 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
318 /* TX queue fori each virtio device for zero copy. */
319 struct mbuf_table tx_queue_zcp[MAX_QUEUES];
321 /* Vlan header struct used to insert vlan tags on TX. */
323 unsigned char h_dest[ETH_ALEN];
324 unsigned char h_source[ETH_ALEN];
327 __be16 h_vlan_encapsulated_proto;
332 uint8_t version_ihl; /**< version and header length */
333 uint8_t type_of_service; /**< type of service */
334 uint16_t total_length; /**< length of packet */
335 uint16_t packet_id; /**< packet ID */
336 uint16_t fragment_offset; /**< fragmentation offset */
337 uint8_t time_to_live; /**< time to live */
338 uint8_t next_proto_id; /**< protocol ID */
339 uint16_t hdr_checksum; /**< header checksum */
340 uint32_t src_addr; /**< source address */
341 uint32_t dst_addr; /**< destination address */
342 } __attribute__((__packed__));
344 /* Header lengths. */
346 #define VLAN_ETH_HLEN 18
348 /* Per-device statistics struct */
349 struct device_statistics {
351 rte_atomic64_t rx_total_atomic;
354 rte_atomic64_t rx_atomic;
356 } __rte_cache_aligned;
357 struct device_statistics dev_statistics[MAX_DEVICES];
360 * Builds up the correct configuration for VMDQ VLAN pool map
361 * according to the pool & queue limits.
364 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
366 struct rte_eth_vmdq_rx_conf conf;
369 memset(&conf, 0, sizeof(conf));
370 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
371 conf.nb_pool_maps = num_devices;
372 conf.enable_loop_back =
373 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back;
375 for (i = 0; i < conf.nb_pool_maps; i++) {
376 conf.pool_map[i].vlan_id = vlan_tags[ i ];
377 conf.pool_map[i].pools = (1UL << i);
380 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
381 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
382 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
387 * Validate the device number according to the max pool number gotten form
388 * dev_info. If the device number is invalid, give the error message and
389 * return -1. Each device must have its own pool.
392 validate_num_devices(uint32_t max_nb_devices)
394 if (num_devices > max_nb_devices) {
395 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
402 * Initialises a given port using global settings and with the rx buffers
403 * coming from the mbuf_pool passed as parameter
406 port_init(uint8_t port)
408 struct rte_eth_dev_info dev_info;
409 struct rte_eth_conf port_conf;
410 uint16_t rx_rings, tx_rings;
411 uint16_t rx_ring_size, tx_ring_size;
415 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
416 rte_eth_dev_info_get (port, &dev_info);
418 /*configure the number of supported virtio devices based on VMDQ limits */
419 num_devices = dev_info.max_vmdq_pools;
420 num_queues = dev_info.max_rx_queues;
423 rx_ring_size = num_rx_descriptor;
424 tx_ring_size = num_tx_descriptor;
425 tx_rings = dev_info.max_tx_queues;
427 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
428 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
429 tx_rings = (uint16_t)rte_lcore_count();
432 retval = validate_num_devices(MAX_DEVICES);
436 /* Get port configuration. */
437 retval = get_eth_conf(&port_conf, num_devices);
441 if (port >= rte_eth_dev_count()) return -1;
443 rx_rings = (uint16_t)num_queues,
444 /* Configure ethernet device. */
445 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
449 /* Setup the queues. */
450 for (q = 0; q < rx_rings; q ++) {
451 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
452 rte_eth_dev_socket_id(port), &rx_conf_default,
453 vpool_array[q].pool);
457 for (q = 0; q < tx_rings; q ++) {
458 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
459 rte_eth_dev_socket_id(port), &tx_conf_default);
464 /* Start the device. */
465 retval = rte_eth_dev_start(port);
467 RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n");
471 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
472 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
473 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
474 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
476 vmdq_ports_eth_addr[port].addr_bytes[0],
477 vmdq_ports_eth_addr[port].addr_bytes[1],
478 vmdq_ports_eth_addr[port].addr_bytes[2],
479 vmdq_ports_eth_addr[port].addr_bytes[3],
480 vmdq_ports_eth_addr[port].addr_bytes[4],
481 vmdq_ports_eth_addr[port].addr_bytes[5]);
487 * Set character device basename.
490 us_vhost_parse_basename(const char *q_arg)
492 /* parse number string */
494 if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
497 snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
503 * Parse the portmask provided at run time.
506 parse_portmask(const char *portmask)
513 /* parse hexadecimal string */
514 pm = strtoul(portmask, &end, 16);
515 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
526 * Parse num options at run time.
529 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
536 /* parse unsigned int string */
537 num = strtoul(q_arg, &end, 10);
538 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
541 if (num > max_valid_value)
552 us_vhost_usage(const char *prgname)
554 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
556 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
557 " --dev-basename <name>\n"
559 " -p PORTMASK: Set mask for ports to be used by application\n"
560 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
561 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
562 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
563 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
564 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
565 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
566 " --dev-basename: The basename to be used for the character device.\n"
567 " --zero-copy [0|1]: disable(default)/enable rx/tx "
569 " --rx-desc-num [0-N]: the number of descriptors on rx, "
570 "used only when zero copy is enabled.\n"
571 " --tx-desc-num [0-N]: the number of descriptors on tx, "
572 "used only when zero copy is enabled.\n",
577 * Parse the arguments given in the command line of the application.
580 us_vhost_parse_args(int argc, char **argv)
585 const char *prgname = argv[0];
586 static struct option long_option[] = {
587 {"vm2vm", required_argument, NULL, 0},
588 {"rx-retry", required_argument, NULL, 0},
589 {"rx-retry-delay", required_argument, NULL, 0},
590 {"rx-retry-num", required_argument, NULL, 0},
591 {"mergeable", required_argument, NULL, 0},
592 {"stats", required_argument, NULL, 0},
593 {"dev-basename", required_argument, NULL, 0},
594 {"zero-copy", required_argument, NULL, 0},
595 {"rx-desc-num", required_argument, NULL, 0},
596 {"tx-desc-num", required_argument, NULL, 0},
600 /* Parse command line */
601 while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
605 enabled_port_mask = parse_portmask(optarg);
606 if (enabled_port_mask == 0) {
607 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
608 us_vhost_usage(prgname);
614 /* Enable/disable vm2vm comms. */
615 if (!strncmp(long_option[option_index].name, "vm2vm",
617 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
619 RTE_LOG(INFO, VHOST_CONFIG,
620 "Invalid argument for "
622 us_vhost_usage(prgname);
625 vm2vm_mode = (vm2vm_type)ret;
629 /* Enable/disable retries on RX. */
630 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
631 ret = parse_num_opt(optarg, 1);
633 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
634 us_vhost_usage(prgname);
641 /* Specify the retries delay time (in useconds) on RX. */
642 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
643 ret = parse_num_opt(optarg, INT32_MAX);
645 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
646 us_vhost_usage(prgname);
649 burst_rx_delay_time = ret;
653 /* Specify the retries number on RX. */
654 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
655 ret = parse_num_opt(optarg, INT32_MAX);
657 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
658 us_vhost_usage(prgname);
661 burst_rx_retry_num = ret;
665 /* Enable/disable RX mergeable buffers. */
666 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
667 ret = parse_num_opt(optarg, 1);
669 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
670 us_vhost_usage(prgname);
675 vmdq_conf_default.rxmode.jumbo_frame = 1;
676 vmdq_conf_default.rxmode.max_rx_pkt_len
677 = JUMBO_FRAME_MAX_SIZE;
682 /* Enable/disable stats. */
683 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
684 ret = parse_num_opt(optarg, INT32_MAX);
686 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
687 us_vhost_usage(prgname);
694 /* Set character device basename. */
695 if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
696 if (us_vhost_parse_basename(optarg) == -1) {
697 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
698 us_vhost_usage(prgname);
703 /* Enable/disable rx/tx zero copy. */
704 if (!strncmp(long_option[option_index].name,
705 "zero-copy", MAX_LONG_OPT_SZ)) {
706 ret = parse_num_opt(optarg, 1);
708 RTE_LOG(INFO, VHOST_CONFIG,
710 " for zero-copy [0|1]\n");
711 us_vhost_usage(prgname);
717 #ifdef RTE_MBUF_REFCNT
718 RTE_LOG(ERR, VHOST_CONFIG, "Before running "
719 "zero copy vhost APP, please "
720 "disable RTE_MBUF_REFCNT\n"
721 "in config file and then rebuild DPDK "
723 "Otherwise please disable zero copy "
724 "flag in command line!\n");
730 /* Specify the descriptor number on RX. */
731 if (!strncmp(long_option[option_index].name,
732 "rx-desc-num", MAX_LONG_OPT_SZ)) {
733 ret = parse_num_opt(optarg, MAX_RING_DESC);
734 if ((ret == -1) || (!POWEROF2(ret))) {
735 RTE_LOG(INFO, VHOST_CONFIG,
736 "Invalid argument for rx-desc-num[0-N],"
737 "power of 2 required.\n");
738 us_vhost_usage(prgname);
741 num_rx_descriptor = ret;
745 /* Specify the descriptor number on TX. */
746 if (!strncmp(long_option[option_index].name,
747 "tx-desc-num", MAX_LONG_OPT_SZ)) {
748 ret = parse_num_opt(optarg, MAX_RING_DESC);
749 if ((ret == -1) || (!POWEROF2(ret))) {
750 RTE_LOG(INFO, VHOST_CONFIG,
751 "Invalid argument for tx-desc-num [0-N],"
752 "power of 2 required.\n");
753 us_vhost_usage(prgname);
756 num_tx_descriptor = ret;
762 /* Invalid option - print options. */
764 us_vhost_usage(prgname);
769 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
770 if (enabled_port_mask & (1 << i))
771 ports[num_ports++] = (uint8_t)i;
774 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
775 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
776 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
780 if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) {
781 RTE_LOG(INFO, VHOST_PORT,
782 "Vhost zero copy doesn't support software vm2vm,"
783 "please specify 'vm2vm 2' to use hardware vm2vm.\n");
787 if ((zero_copy == 1) && (vmdq_conf_default.rxmode.jumbo_frame == 1)) {
788 RTE_LOG(INFO, VHOST_PORT,
789 "Vhost zero copy doesn't support jumbo frame,"
790 "please specify '--mergeable 0' to disable the "
791 "mergeable feature.\n");
799 * Update the global var NUM_PORTS and array PORTS according to system ports number
800 * and return valid ports number
802 static unsigned check_ports_num(unsigned nb_ports)
804 unsigned valid_num_ports = num_ports;
807 if (num_ports > nb_ports) {
808 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
809 num_ports, nb_ports);
810 num_ports = nb_ports;
813 for (portid = 0; portid < num_ports; portid ++) {
814 if (ports[portid] >= nb_ports) {
815 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
816 ports[portid], (nb_ports - 1));
817 ports[portid] = INVALID_PORT_ID;
821 return valid_num_ports;
825 * Macro to print out packet contents. Wrapped in debug define so that the
826 * data path is not effected when debug is disabled.
829 #define PRINT_PACKET(device, addr, size, header) do { \
830 char *pkt_addr = (char*)(addr); \
831 unsigned int index; \
832 char packet[MAX_PRINT_BUFF]; \
835 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
837 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
838 for (index = 0; index < (size); index++) { \
839 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
840 "%02hhx ", pkt_addr[index]); \
842 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
844 LOG_DEBUG(VHOST_DATA, "%s", packet); \
847 #define PRINT_PACKET(device, addr, size, header) do{} while(0)
851 * Function to convert guest physical addresses to vhost physical addresses.
852 * This is used to convert virtio buffer addresses.
854 static inline uint64_t __attribute__((always_inline))
855 gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa,
856 uint32_t buf_len, hpa_type *addr_type)
858 struct virtio_memory_regions_hpa *region;
860 uint64_t vhost_pa = 0;
862 *addr_type = PHYS_ADDR_INVALID;
864 for (regionidx = 0; regionidx < vdev->nregions_hpa; regionidx++) {
865 region = &vdev->regions_hpa[regionidx];
866 if ((guest_pa >= region->guest_phys_address) &&
867 (guest_pa <= region->guest_phys_address_end)) {
868 vhost_pa = region->host_phys_addr_offset + guest_pa;
869 if (likely((guest_pa + buf_len - 1)
870 <= region->guest_phys_address_end))
871 *addr_type = PHYS_ADDR_CONTINUOUS;
873 *addr_type = PHYS_ADDR_CROSS_SUBREG;
878 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n",
879 vdev->dev->device_fh, (void *)(uintptr_t)guest_pa,
880 (void *)(uintptr_t)vhost_pa);
886 * Compares a packet destination MAC address to a device MAC address.
888 static inline int __attribute__((always_inline))
889 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
891 return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
895 * This function learns the MAC address of the device and registers this along with a
896 * vlan tag to a VMDQ.
899 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
901 struct ether_hdr *pkt_hdr;
902 struct virtio_net_data_ll *dev_ll;
903 struct virtio_net *dev = vdev->dev;
906 /* Learn MAC address of guest device from packet */
907 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
909 dev_ll = ll_root_used;
911 while (dev_ll != NULL) {
912 if (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->vdev->mac_address)) {
913 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
916 dev_ll = dev_ll->next;
919 for (i = 0; i < ETHER_ADDR_LEN; i++)
920 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
922 /* vlan_tag currently uses the device_id. */
923 vdev->vlan_tag = vlan_tags[dev->device_fh];
925 /* Print out VMDQ registration info. */
926 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
928 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
929 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
930 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
933 /* Register the MAC address. */
934 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, (uint32_t)dev->device_fh);
936 RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
939 /* Enable stripping of the vlan tag as we handle routing. */
940 rte_eth_dev_set_vlan_strip_on_queue(ports[0], (uint16_t)vdev->vmdq_rx_q, 1);
942 /* Set device as ready for RX. */
943 vdev->ready = DEVICE_RX;
949 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
950 * queue before disabling RX on the device.
953 unlink_vmdq(struct vhost_dev *vdev)
957 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
959 if (vdev->ready == DEVICE_RX) {
960 /*clear MAC and VLAN settings*/
961 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
962 for (i = 0; i < 6; i++)
963 vdev->mac_address.addr_bytes[i] = 0;
967 /*Clear out the receive buffers*/
968 rx_count = rte_eth_rx_burst(ports[0],
969 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
972 for (i = 0; i < rx_count; i++)
973 rte_pktmbuf_free(pkts_burst[i]);
975 rx_count = rte_eth_rx_burst(ports[0],
976 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
979 vdev->ready = DEVICE_MAC_LEARNING;
984 * Check if the packet destination MAC address is for a local device. If so then put
985 * the packet on that devices RX queue. If not then return.
987 static inline int __attribute__((always_inline))
988 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
990 struct virtio_net_data_ll *dev_ll;
991 struct ether_hdr *pkt_hdr;
993 struct virtio_net *dev = vdev->dev;
994 struct virtio_net *tdev; /* destination virito device */
996 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
998 /*get the used devices list*/
999 dev_ll = ll_root_used;
1001 while (dev_ll != NULL) {
1002 if ((dev_ll->vdev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr),
1003 &dev_ll->vdev->mac_address)) {
1005 /* Drop the packet if the TX packet is destined for the TX device. */
1006 if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
1007 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
1011 tdev = dev_ll->vdev->dev;
1014 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh);
1016 if (unlikely(dev_ll->vdev->remove)) {
1017 /*drop the packet if the device is marked for removal*/
1018 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh);
1020 /*send the packet to the local virtio device*/
1021 ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
1024 &dev_statistics[tdev->device_fh].rx_total_atomic,
1027 &dev_statistics[tdev->device_fh].rx_atomic,
1029 dev_statistics[tdev->device_fh].tx_total++;
1030 dev_statistics[tdev->device_fh].tx += ret;
1036 dev_ll = dev_ll->next;
1043 * This function routes the TX packet to the correct interface. This may be a local device
1044 * or the physical port.
1046 static inline void __attribute__((always_inline))
1047 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1049 struct mbuf_table *tx_q;
1050 struct rte_mbuf **m_table;
1051 unsigned len, ret, offset = 0;
1052 const uint16_t lcore_id = rte_lcore_id();
1053 struct virtio_net_data_ll *dev_ll = ll_root_used;
1054 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1055 struct virtio_net *dev = vdev->dev;
1057 /*check if destination is local VM*/
1058 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
1059 rte_pktmbuf_free(m);
1063 if (vm2vm_mode == VM2VM_HARDWARE) {
1064 while (dev_ll != NULL) {
1065 if ((dev_ll->vdev->ready == DEVICE_RX)
1066 && ether_addr_cmp(&(pkt_hdr->d_addr),
1067 &dev_ll->vdev->mac_address)) {
1069 * Drop the packet if the TX packet is
1070 * destined for the TX device.
1072 if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
1073 LOG_DEBUG(VHOST_DATA,
1074 "(%"PRIu64") TX: Source and destination"
1075 " MAC addresses are the same. Dropping "
1077 dev_ll->vdev->dev->device_fh);
1078 rte_pktmbuf_free(m);
1083 * HW vlan strip will reduce the packet length
1084 * by minus length of vlan tag, so need restore
1085 * the packet length by plus it.
1090 vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
1092 LOG_DEBUG(VHOST_DATA,
1093 "(%"PRIu64") TX: pkt to local VM device id:"
1094 "(%"PRIu64") vlan tag: %d.\n",
1095 dev->device_fh, dev_ll->vdev->dev->device_fh,
1100 dev_ll = dev_ll->next;
1104 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
1106 /*Add packet to the port tx queue*/
1107 tx_q = &lcore_tx_queue[lcore_id];
1110 m->ol_flags = PKT_TX_VLAN_PKT;
1112 m->data_len += offset;
1113 m->pkt_len += offset;
1115 m->vlan_tci = vlan_tag;
1117 tx_q->m_table[len] = m;
1120 dev_statistics[dev->device_fh].tx_total++;
1121 dev_statistics[dev->device_fh].tx++;
1124 if (unlikely(len == MAX_PKT_BURST)) {
1125 m_table = (struct rte_mbuf **)tx_q->m_table;
1126 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
1127 /* Free any buffers not handled by TX and update the port stats. */
1128 if (unlikely(ret < len)) {
1130 rte_pktmbuf_free(m_table[ret]);
1131 } while (++ret < len);
1141 * This function is called by each data core. It handles all RX/TX registered with the
1142 * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
1143 * with all devices in the main linked list.
1146 switch_worker(__attribute__((unused)) void *arg)
1148 struct rte_mempool *mbuf_pool = arg;
1149 struct virtio_net *dev = NULL;
1150 struct vhost_dev *vdev = NULL;
1151 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1152 struct virtio_net_data_ll *dev_ll;
1153 struct mbuf_table *tx_q;
1154 volatile struct lcore_ll_info *lcore_ll;
1155 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
1156 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
1158 const uint16_t lcore_id = rte_lcore_id();
1159 const uint16_t num_cores = (uint16_t)rte_lcore_count();
1160 uint16_t rx_count = 0;
1164 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1165 lcore_ll = lcore_info[lcore_id].lcore_ll;
1168 tx_q = &lcore_tx_queue[lcore_id];
1169 for (i = 0; i < num_cores; i ++) {
1170 if (lcore_ids[i] == lcore_id) {
1177 cur_tsc = rte_rdtsc();
1179 * TX burst queue drain
1181 diff_tsc = cur_tsc - prev_tsc;
1182 if (unlikely(diff_tsc > drain_tsc)) {
1185 LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
1187 /*Tx any packets in the queue*/
1188 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
1189 (struct rte_mbuf **)tx_q->m_table,
1190 (uint16_t)tx_q->len);
1191 if (unlikely(ret < tx_q->len)) {
1193 rte_pktmbuf_free(tx_q->m_table[ret]);
1194 } while (++ret < tx_q->len);
1204 rte_prefetch0(lcore_ll->ll_root_used);
1206 * Inform the configuration core that we have exited the linked list and that no devices are
1207 * in use if requested.
1209 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
1210 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1215 dev_ll = lcore_ll->ll_root_used;
1217 while (dev_ll != NULL) {
1218 /*get virtio device ID*/
1219 vdev = dev_ll->vdev;
1222 if (unlikely(vdev->remove)) {
1223 dev_ll = dev_ll->next;
1225 vdev->ready = DEVICE_SAFE_REMOVE;
1228 if (likely(vdev->ready == DEVICE_RX)) {
1230 rx_count = rte_eth_rx_burst(ports[0],
1231 vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
1235 * Retry is enabled and the queue is full then we wait and retry to avoid packet loss
1236 * Here MAX_PKT_BURST must be less than virtio queue size
1238 if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) {
1239 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1240 rte_delay_us(burst_rx_delay_time);
1241 if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
1245 ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
1248 &dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
1251 &dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
1253 while (likely(rx_count)) {
1255 rte_pktmbuf_free(pkts_burst[rx_count]);
1261 if (likely(!vdev->remove)) {
1262 /* Handle guest TX*/
1263 tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
1264 /* If this is the first received packet we need to learn the MAC and setup VMDQ */
1265 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
1266 if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
1268 rte_pktmbuf_free(pkts_burst[tx_count]);
1272 virtio_tx_route(vdev, pkts_burst[--tx_count], (uint16_t)dev->device_fh);
1275 /*move to the next device in the list*/
1276 dev_ll = dev_ll->next;
1284 * This function gets available ring number for zero copy rx.
1285 * Only one thread will call this funciton for a paticular virtio device,
1286 * so, it is designed as non-thread-safe function.
1288 static inline uint32_t __attribute__((always_inline))
1289 get_available_ring_num_zcp(struct virtio_net *dev)
1291 struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
1294 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1295 return (uint32_t)(avail_idx - vq->last_used_idx_res);
1299 * This function gets available ring index for zero copy rx,
1300 * it will retry 'burst_rx_retry_num' times till it get enough ring index.
1301 * Only one thread will call this funciton for a paticular virtio device,
1302 * so, it is designed as non-thread-safe function.
1304 static inline uint32_t __attribute__((always_inline))
1305 get_available_ring_index_zcp(struct virtio_net *dev,
1306 uint16_t *res_base_idx, uint32_t count)
1308 struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
1311 uint16_t free_entries;
1313 *res_base_idx = vq->last_used_idx_res;
1314 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1315 free_entries = (avail_idx - *res_base_idx);
1317 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
1319 "res base idx:%d, free entries:%d\n",
1320 dev->device_fh, avail_idx, *res_base_idx,
1324 * If retry is enabled and the queue is full then we wait
1325 * and retry to avoid packet loss.
1327 if (enable_retry && unlikely(count > free_entries)) {
1328 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1329 rte_delay_us(burst_rx_delay_time);
1330 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1331 free_entries = (avail_idx - *res_base_idx);
1332 if (count <= free_entries)
1337 /*check that we have enough buffers*/
1338 if (unlikely(count > free_entries))
1339 count = free_entries;
1341 if (unlikely(count == 0)) {
1342 LOG_DEBUG(VHOST_DATA,
1343 "(%"PRIu64") Fail in get_available_ring_index_zcp: "
1344 "avail idx: %d, res base idx:%d, free entries:%d\n",
1345 dev->device_fh, avail_idx,
1346 *res_base_idx, free_entries);
1350 vq->last_used_idx_res = *res_base_idx + count;
1356 * This function put descriptor back to used list.
1358 static inline void __attribute__((always_inline))
1359 put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx)
1361 uint16_t res_cur_idx = vq->last_used_idx;
1362 vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx;
1363 vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0;
1364 rte_compiler_barrier();
1365 *(volatile uint16_t *)&vq->used->idx += 1;
1366 vq->last_used_idx += 1;
1368 /* Kick the guest if necessary. */
1369 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1370 eventfd_write((int)vq->kickfd, 1);
1374 * This function get available descriptor from vitio vring and un-attached mbuf
1375 * from vpool->ring, and then attach them together. It needs adjust the offset
1376 * for buff_addr and phys_addr accroding to PMD implementation, otherwise the
1377 * frame data may be put to wrong location in mbuf.
1379 static inline void __attribute__((always_inline))
1380 attach_rxmbuf_zcp(struct virtio_net *dev)
1382 uint16_t res_base_idx, desc_idx;
1383 uint64_t buff_addr, phys_addr;
1384 struct vhost_virtqueue *vq;
1385 struct vring_desc *desc;
1386 struct rte_mbuf *mbuf = NULL;
1387 struct vpool *vpool;
1389 struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
1391 vpool = &vpool_array[vdev->vmdq_rx_q];
1392 vq = dev->virtqueue[VIRTIO_RXQ];
1395 if (unlikely(get_available_ring_index_zcp(vdev->dev, &res_base_idx,
1398 desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)];
1400 desc = &vq->desc[desc_idx];
1401 if (desc->flags & VRING_DESC_F_NEXT) {
1402 desc = &vq->desc[desc->next];
1403 buff_addr = gpa_to_vva(dev, desc->addr);
1404 phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len,
1407 buff_addr = gpa_to_vva(dev,
1408 desc->addr + vq->vhost_hlen);
1409 phys_addr = gpa_to_hpa(vdev,
1410 desc->addr + vq->vhost_hlen,
1411 desc->len, &addr_type);
1414 if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
1415 RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Invalid frame buffer"
1416 " address found when attaching RX frame buffer"
1417 " address!\n", dev->device_fh);
1418 put_desc_to_used_list_zcp(vq, desc_idx);
1423 * Check if the frame buffer address from guest crosses
1424 * sub-region or not.
1426 if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
1427 RTE_LOG(ERR, VHOST_DATA,
1428 "(%"PRIu64") Frame buffer address cross "
1429 "sub-regioin found when attaching RX frame "
1430 "buffer address!\n",
1432 put_desc_to_used_list_zcp(vq, desc_idx);
1435 } while (unlikely(phys_addr == 0));
1437 rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
1438 if (unlikely(mbuf == NULL)) {
1439 LOG_DEBUG(VHOST_DATA,
1440 "(%"PRIu64") in attach_rxmbuf_zcp: "
1441 "ring_sc_dequeue fail.\n",
1443 put_desc_to_used_list_zcp(vq, desc_idx);
1447 if (unlikely(vpool->buf_size > desc->len)) {
1448 LOG_DEBUG(VHOST_DATA,
1449 "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
1450 "length(%d) of descriptor idx: %d less than room "
1451 "size required: %d\n",
1452 dev->device_fh, desc->len, desc_idx, vpool->buf_size);
1453 put_desc_to_used_list_zcp(vq, desc_idx);
1454 rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
1458 mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
1459 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1460 mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
1461 mbuf->data_len = desc->len;
1462 MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
1464 LOG_DEBUG(VHOST_DATA,
1465 "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
1466 "descriptor idx:%d\n",
1467 dev->device_fh, res_base_idx, desc_idx);
1469 __rte_mbuf_raw_free(mbuf);
1475 * Detach an attched packet mbuf -
1476 * - restore original mbuf address and length values.
1477 * - reset pktmbuf data and data_len to their default values.
1478 * All other fields of the given packet mbuf will be left intact.
1481 * The attached packet mbuf.
1483 static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
1485 const struct rte_mempool *mp = m->pool;
1486 void *buf = RTE_MBUF_TO_BADDR(m);
1488 uint32_t buf_len = mp->elt_size - sizeof(*m);
1489 m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);
1492 m->buf_len = (uint16_t)buf_len;
1494 buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
1495 RTE_PKTMBUF_HEADROOM : m->buf_len;
1496 m->data_off = buf_ofs;
1502 * This function is called after packets have been transimited. It fetchs mbuf
1503 * from vpool->pool, detached it and put into vpool->ring. It also update the
1504 * used index and kick the guest if necessary.
1506 static inline uint32_t __attribute__((always_inline))
1507 txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
1509 struct rte_mbuf *mbuf;
1510 struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
1511 uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
1513 uint32_t mbuf_count = rte_mempool_count(vpool->pool);
1515 LOG_DEBUG(VHOST_DATA,
1516 "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
1518 dev->device_fh, mbuf_count);
1519 LOG_DEBUG(VHOST_DATA,
1520 "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
1522 dev->device_fh, rte_ring_count(vpool->ring));
1524 for (index = 0; index < mbuf_count; index++) {
1525 mbuf = __rte_mbuf_raw_alloc(vpool->pool);
1526 if (likely(RTE_MBUF_INDIRECT(mbuf)))
1527 pktmbuf_detach_zcp(mbuf);
1528 rte_ring_sp_enqueue(vpool->ring, mbuf);
1530 /* Update used index buffer information. */
1531 vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf);
1532 vq->used->ring[used_idx].len = 0;
1534 used_idx = (used_idx + 1) & (vq->size - 1);
1537 LOG_DEBUG(VHOST_DATA,
1538 "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
1540 dev->device_fh, rte_mempool_count(vpool->pool));
1541 LOG_DEBUG(VHOST_DATA,
1542 "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
1544 dev->device_fh, rte_ring_count(vpool->ring));
1545 LOG_DEBUG(VHOST_DATA,
1546 "(%"PRIu64") in txmbuf_clean_zcp: before updated "
1547 "vq->last_used_idx:%d\n",
1548 dev->device_fh, vq->last_used_idx);
1550 vq->last_used_idx += mbuf_count;
1552 LOG_DEBUG(VHOST_DATA,
1553 "(%"PRIu64") in txmbuf_clean_zcp: after updated "
1554 "vq->last_used_idx:%d\n",
1555 dev->device_fh, vq->last_used_idx);
1557 rte_compiler_barrier();
1559 *(volatile uint16_t *)&vq->used->idx += mbuf_count;
1561 /* Kick guest if required. */
1562 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1563 eventfd_write((int)vq->kickfd, 1);
1569 * This function is called when a virtio device is destroy.
1570 * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.
1572 static void mbuf_destroy_zcp(struct vpool *vpool)
1574 struct rte_mbuf *mbuf = NULL;
1575 uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
1577 LOG_DEBUG(VHOST_CONFIG,
1578 "in mbuf_destroy_zcp: mbuf count in mempool before "
1579 "mbuf_destroy_zcp is: %d\n",
1581 LOG_DEBUG(VHOST_CONFIG,
1582 "in mbuf_destroy_zcp: mbuf count in ring before "
1583 "mbuf_destroy_zcp is : %d\n",
1584 rte_ring_count(vpool->ring));
1586 for (index = 0; index < mbuf_count; index++) {
1587 mbuf = __rte_mbuf_raw_alloc(vpool->pool);
1588 if (likely(mbuf != NULL)) {
1589 if (likely(RTE_MBUF_INDIRECT(mbuf)))
1590 pktmbuf_detach_zcp(mbuf);
1591 rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
1595 LOG_DEBUG(VHOST_CONFIG,
1596 "in mbuf_destroy_zcp: mbuf count in mempool after "
1597 "mbuf_destroy_zcp is: %d\n",
1598 rte_mempool_count(vpool->pool));
1599 LOG_DEBUG(VHOST_CONFIG,
1600 "in mbuf_destroy_zcp: mbuf count in ring after "
1601 "mbuf_destroy_zcp is : %d\n",
1602 rte_ring_count(vpool->ring));
1606 * This function update the use flag and counter.
1608 static inline uint32_t __attribute__((always_inline))
1609 virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
1612 struct vhost_virtqueue *vq;
1613 struct vring_desc *desc;
1614 struct rte_mbuf *buff;
1615 /* The virtio_hdr is initialised to 0. */
1616 struct virtio_net_hdr_mrg_rxbuf virtio_hdr
1617 = {{0, 0, 0, 0, 0, 0}, 0};
1618 uint64_t buff_hdr_addr = 0;
1619 uint32_t head[MAX_PKT_BURST], packet_len = 0;
1620 uint32_t head_idx, packet_success = 0;
1621 uint16_t res_cur_idx;
1623 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
1628 vq = dev->virtqueue[VIRTIO_RXQ];
1629 count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
1631 res_cur_idx = vq->last_used_idx;
1632 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
1633 dev->device_fh, res_cur_idx, res_cur_idx + count);
1635 /* Retrieve all of the head indexes first to avoid caching issues. */
1636 for (head_idx = 0; head_idx < count; head_idx++)
1637 head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);
1639 /*Prefetch descriptor index. */
1640 rte_prefetch0(&vq->desc[head[packet_success]]);
1642 while (packet_success != count) {
1643 /* Get descriptor from available ring */
1644 desc = &vq->desc[head[packet_success]];
1646 buff = pkts[packet_success];
1647 LOG_DEBUG(VHOST_DATA,
1648 "(%"PRIu64") in dev_rx_zcp: update the used idx for "
1649 "pkt[%d] descriptor idx: %d\n",
1650 dev->device_fh, packet_success,
1651 MBUF_HEADROOM_UINT32(buff));
1654 (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)
1655 + RTE_PKTMBUF_HEADROOM),
1656 rte_pktmbuf_data_len(buff), 0);
1658 /* Buffer address translation for virtio header. */
1659 buff_hdr_addr = gpa_to_vva(dev, desc->addr);
1660 packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
1663 * If the descriptors are chained the header and data are
1664 * placed in separate buffers.
1666 if (desc->flags & VRING_DESC_F_NEXT) {
1667 desc->len = vq->vhost_hlen;
1668 desc = &vq->desc[desc->next];
1669 desc->len = rte_pktmbuf_data_len(buff);
1671 desc->len = packet_len;
1674 /* Update used ring with desc information */
1675 vq->used->ring[res_cur_idx & (vq->size - 1)].id
1676 = head[packet_success];
1677 vq->used->ring[res_cur_idx & (vq->size - 1)].len
1682 /* A header is required per buffer. */
1683 rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
1684 (const void *)&virtio_hdr, vq->vhost_hlen);
1686 PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
1688 if (likely(packet_success < count)) {
1689 /* Prefetch descriptor index. */
1690 rte_prefetch0(&vq->desc[head[packet_success]]);
1694 rte_compiler_barrier();
1696 LOG_DEBUG(VHOST_DATA,
1697 "(%"PRIu64") in dev_rx_zcp: before update used idx: "
1698 "vq.last_used_idx: %d, vq->used->idx: %d\n",
1699 dev->device_fh, vq->last_used_idx, vq->used->idx);
1701 *(volatile uint16_t *)&vq->used->idx += count;
1702 vq->last_used_idx += count;
1704 LOG_DEBUG(VHOST_DATA,
1705 "(%"PRIu64") in dev_rx_zcp: after update used idx: "
1706 "vq.last_used_idx: %d, vq->used->idx: %d\n",
1707 dev->device_fh, vq->last_used_idx, vq->used->idx);
1709 /* Kick the guest if necessary. */
1710 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1711 eventfd_write((int)vq->kickfd, 1);
1717 * This function routes the TX packet to the correct interface.
1718 * This may be a local device or the physical port.
1720 static inline void __attribute__((always_inline))
1721 virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
1722 uint32_t desc_idx, uint8_t need_copy)
1724 struct mbuf_table *tx_q;
1725 struct rte_mbuf **m_table;
1726 struct rte_mbuf *mbuf = NULL;
1727 unsigned len, ret, offset = 0;
1728 struct vpool *vpool;
1729 struct virtio_net_data_ll *dev_ll = ll_root_used;
1730 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
1731 uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
1732 uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q;
1734 /*Add packet to the port tx queue*/
1735 tx_q = &tx_queue_zcp[vmdq_rx_q];
1738 /* Allocate an mbuf and populate the structure. */
1739 vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q];
1740 rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
1741 if (unlikely(mbuf == NULL)) {
1742 struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
1743 RTE_LOG(ERR, VHOST_DATA,
1744 "(%"PRIu64") Failed to allocate memory for mbuf.\n",
1746 put_desc_to_used_list_zcp(vq, desc_idx);
1750 if (vm2vm_mode == VM2VM_HARDWARE) {
1751 /* Avoid using a vlan tag from any vm for external pkt, such as
1752 * vlan_tags[dev->device_fh], oterwise, it conflicts when pool
1753 * selection, MAC address determines it as an external pkt
1754 * which should go to network, while vlan tag determine it as
1755 * a vm2vm pkt should forward to another vm. Hardware confuse
1756 * such a ambiguous situation, so pkt will lost.
1758 vlan_tag = external_pkt_default_vlan_tag;
1759 while (dev_ll != NULL) {
1760 if (likely(dev_ll->vdev->ready == DEVICE_RX) &&
1761 ether_addr_cmp(&(pkt_hdr->d_addr),
1762 &dev_ll->vdev->mac_address)) {
1765 * Drop the packet if the TX packet is destined
1766 * for the TX device.
1768 if (unlikely(dev_ll->vdev->dev->device_fh
1769 == dev->device_fh)) {
1770 LOG_DEBUG(VHOST_DATA,
1771 "(%"PRIu64") TX: Source and destination"
1772 "MAC addresses are the same. Dropping "
1774 dev_ll->vdev->dev->device_fh);
1775 MBUF_HEADROOM_UINT32(mbuf)
1776 = (uint32_t)desc_idx;
1777 __rte_mbuf_raw_free(mbuf);
1782 * Packet length offset 4 bytes for HW vlan
1783 * strip when L2 switch back.
1788 vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
1790 LOG_DEBUG(VHOST_DATA,
1791 "(%"PRIu64") TX: pkt to local VM device id:"
1792 "(%"PRIu64") vlan tag: %d.\n",
1793 dev->device_fh, dev_ll->vdev->dev->device_fh,
1798 dev_ll = dev_ll->next;
1802 mbuf->nb_segs = m->nb_segs;
1803 mbuf->next = m->next;
1804 mbuf->data_len = m->data_len + offset;
1805 mbuf->pkt_len = mbuf->data_len;
1806 if (unlikely(need_copy)) {
1807 /* Copy the packet contents to the mbuf. */
1808 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
1809 rte_pktmbuf_mtod(m, void *),
1812 mbuf->data_off = m->data_off;
1813 mbuf->buf_physaddr = m->buf_physaddr;
1814 mbuf->buf_addr = m->buf_addr;
1816 mbuf->ol_flags = PKT_TX_VLAN_PKT;
1817 mbuf->vlan_tci = vlan_tag;
1818 mbuf->l2_len = sizeof(struct ether_hdr);
1819 mbuf->l3_len = sizeof(struct ipv4_hdr);
1820 MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
1822 tx_q->m_table[len] = mbuf;
1825 LOG_DEBUG(VHOST_DATA,
1826 "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
1829 (mbuf->next == NULL) ? "null" : "non-null");
1832 dev_statistics[dev->device_fh].tx_total++;
1833 dev_statistics[dev->device_fh].tx++;
1836 if (unlikely(len == MAX_PKT_BURST)) {
1837 m_table = (struct rte_mbuf **)tx_q->m_table;
1838 ret = rte_eth_tx_burst(ports[0],
1839 (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
1842 * Free any buffers not handled by TX and update
1845 if (unlikely(ret < len)) {
1847 rte_pktmbuf_free(m_table[ret]);
1848 } while (++ret < len);
1852 txmbuf_clean_zcp(dev, vpool);
1861 * This function TX all available packets in virtio TX queue for one
1862 * virtio-net device. If it is first packet, it learns MAC address and
1865 static inline void __attribute__((always_inline))
1866 virtio_dev_tx_zcp(struct virtio_net *dev)
1869 struct vhost_virtqueue *vq;
1870 struct vring_desc *desc;
1871 uint64_t buff_addr = 0, phys_addr;
1872 uint32_t head[MAX_PKT_BURST];
1874 uint16_t free_entries, packet_success = 0;
1876 uint8_t need_copy = 0;
1878 struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
1880 vq = dev->virtqueue[VIRTIO_TXQ];
1881 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1883 /* If there are no available buffers then return. */
1884 if (vq->last_used_idx_res == avail_idx)
1887 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
1889 /* Prefetch available ring to retrieve head indexes. */
1890 rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
1892 /* Get the number of free entries in the ring */
1893 free_entries = (avail_idx - vq->last_used_idx_res);
1895 /* Limit to MAX_PKT_BURST. */
1897 = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
1899 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
1900 dev->device_fh, free_entries);
1902 /* Retrieve all of the head indexes first to avoid caching issues. */
1903 for (i = 0; i < free_entries; i++)
1905 = vq->avail->ring[(vq->last_used_idx_res + i)
1908 vq->last_used_idx_res += free_entries;
1910 /* Prefetch descriptor index. */
1911 rte_prefetch0(&vq->desc[head[packet_success]]);
1912 rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
1914 while (packet_success < free_entries) {
1915 desc = &vq->desc[head[packet_success]];
1917 /* Discard first buffer as it is the virtio header */
1918 desc = &vq->desc[desc->next];
1920 /* Buffer address translation. */
1921 buff_addr = gpa_to_vva(dev, desc->addr);
1922 phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len, &addr_type);
1924 if (likely(packet_success < (free_entries - 1)))
1925 /* Prefetch descriptor index. */
1926 rte_prefetch0(&vq->desc[head[packet_success + 1]]);
1928 if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
1929 RTE_LOG(ERR, VHOST_DATA,
1930 "(%"PRIu64") Invalid frame buffer address found"
1931 "when TX packets!\n",
1937 /* Prefetch buffer address. */
1938 rte_prefetch0((void *)(uintptr_t)buff_addr);
1941 * Setup dummy mbuf. This is copied to a real mbuf if
1942 * transmitted out the physical port.
1944 m.data_len = desc->len;
1948 m.buf_addr = (void *)(uintptr_t)buff_addr;
1949 m.buf_physaddr = phys_addr;
1952 * Check if the frame buffer address from guest crosses
1953 * sub-region or not.
1955 if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
1956 RTE_LOG(ERR, VHOST_DATA,
1957 "(%"PRIu64") Frame buffer address cross "
1958 "sub-regioin found when attaching TX frame "
1959 "buffer address!\n",
1965 PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
1968 * If this is the first received packet we need to learn
1969 * the MAC and setup VMDQ
1971 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING)) {
1972 if (vdev->remove || (link_vmdq(vdev, &m) == -1)) {
1974 * Discard frame if device is scheduled for
1975 * removal or a duplicate MAC address is found.
1977 packet_success += free_entries;
1978 vq->last_used_idx += packet_success;
1983 virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy);
1989 * This function is called by each data core. It handles all RX/TX registered
1990 * with the core. For TX the specific lcore linked list is used. For RX, MAC
1991 * addresses are compared with all devices in the main linked list.
1994 switch_worker_zcp(__attribute__((unused)) void *arg)
1996 struct virtio_net *dev = NULL;
1997 struct vhost_dev *vdev = NULL;
1998 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1999 struct virtio_net_data_ll *dev_ll;
2000 struct mbuf_table *tx_q;
2001 volatile struct lcore_ll_info *lcore_ll;
2002 const uint64_t drain_tsc
2003 = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
2004 * BURST_TX_DRAIN_US;
2005 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
2007 const uint16_t lcore_id = rte_lcore_id();
2008 uint16_t count_in_ring, rx_count = 0;
2010 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
2012 lcore_ll = lcore_info[lcore_id].lcore_ll;
2016 cur_tsc = rte_rdtsc();
2018 /* TX burst queue drain */
2019 diff_tsc = cur_tsc - prev_tsc;
2020 if (unlikely(diff_tsc > drain_tsc)) {
2022 * Get mbuf from vpool.pool and detach mbuf and
2023 * put back into vpool.ring.
2025 dev_ll = lcore_ll->ll_root_used;
2026 while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
2027 /* Get virtio device ID */
2028 vdev = dev_ll->vdev;
2031 if (likely(!vdev->remove)) {
2032 tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
2034 LOG_DEBUG(VHOST_DATA,
2035 "TX queue drained after timeout"
2036 " with burst size %u\n",
2040 * Tx any packets in the queue
2042 ret = rte_eth_tx_burst(
2044 (uint16_t)tx_q->txq_id,
2045 (struct rte_mbuf **)
2047 (uint16_t)tx_q->len);
2048 if (unlikely(ret < tx_q->len)) {
2051 tx_q->m_table[ret]);
2052 } while (++ret < tx_q->len);
2056 txmbuf_clean_zcp(dev,
2057 &vpool_array[MAX_QUEUES+vdev->vmdq_rx_q]);
2060 dev_ll = dev_ll->next;
2065 rte_prefetch0(lcore_ll->ll_root_used);
2068 * Inform the configuration core that we have exited the linked
2069 * list and that no devices are in use if requested.
2071 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
2072 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
2074 /* Process devices */
2075 dev_ll = lcore_ll->ll_root_used;
2077 while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
2078 vdev = dev_ll->vdev;
2080 if (unlikely(vdev->remove)) {
2081 dev_ll = dev_ll->next;
2083 vdev->ready = DEVICE_SAFE_REMOVE;
2087 if (likely(vdev->ready == DEVICE_RX)) {
2088 uint32_t index = vdev->vmdq_rx_q;
2091 = rte_ring_count(vpool_array[index].ring);
2092 uint16_t free_entries
2093 = (uint16_t)get_available_ring_num_zcp(dev);
2096 * Attach all mbufs in vpool.ring and put back
2100 i < RTE_MIN(free_entries,
2101 RTE_MIN(count_in_ring, MAX_PKT_BURST));
2103 attach_rxmbuf_zcp(dev);
2105 /* Handle guest RX */
2106 rx_count = rte_eth_rx_burst(ports[0],
2107 vdev->vmdq_rx_q, pkts_burst,
2111 ret_count = virtio_dev_rx_zcp(dev,
2112 pkts_burst, rx_count);
2114 dev_statistics[dev->device_fh].rx_total
2116 dev_statistics[dev->device_fh].rx
2119 while (likely(rx_count)) {
2122 pkts_burst[rx_count]);
2123 rte_ring_sp_enqueue(
2124 vpool_array[index].ring,
2125 (void *)pkts_burst[rx_count]);
2130 if (likely(!vdev->remove))
2131 /* Handle guest TX */
2132 virtio_dev_tx_zcp(dev);
2134 /* Move to the next device in the list */
2135 dev_ll = dev_ll->next;
2144 * Add an entry to a used linked list. A free entry must first be found
2145 * in the free linked list using get_data_ll_free_entry();
2148 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
2149 struct virtio_net_data_ll *ll_dev)
2151 struct virtio_net_data_ll *ll = *ll_root_addr;
2153 /* Set next as NULL and use a compiler barrier to avoid reordering. */
2154 ll_dev->next = NULL;
2155 rte_compiler_barrier();
2157 /* If ll == NULL then this is the first device. */
2159 /* Increment to the tail of the linked list. */
2160 while ((ll->next != NULL) )
2165 *ll_root_addr = ll_dev;
2170 * Remove an entry from a used linked list. The entry must then be added to
2171 * the free linked list using put_data_ll_free_entry().
2174 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
2175 struct virtio_net_data_ll *ll_dev,
2176 struct virtio_net_data_ll *ll_dev_last)
2178 struct virtio_net_data_ll *ll = *ll_root_addr;
2180 if (unlikely((ll == NULL) || (ll_dev == NULL)))
2184 *ll_root_addr = ll_dev->next;
2186 if (likely(ll_dev_last != NULL))
2187 ll_dev_last->next = ll_dev->next;
2189 RTE_LOG(ERR, VHOST_CONFIG, "Remove entry form ll failed.\n");
2193 * Find and return an entry from the free linked list.
2195 static struct virtio_net_data_ll *
2196 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
2198 struct virtio_net_data_ll *ll_free = *ll_root_addr;
2199 struct virtio_net_data_ll *ll_dev;
2201 if (ll_free == NULL)
2205 *ll_root_addr = ll_free->next;
2211 * Place an entry back on to the free linked list.
2214 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
2215 struct virtio_net_data_ll *ll_dev)
2217 struct virtio_net_data_ll *ll_free = *ll_root_addr;
2222 ll_dev->next = ll_free;
2223 *ll_root_addr = ll_dev;
2227 * Creates a linked list of a given size.
2229 static struct virtio_net_data_ll *
2230 alloc_data_ll(uint32_t size)
2232 struct virtio_net_data_ll *ll_new;
2235 /* Malloc and then chain the linked list. */
2236 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
2237 if (ll_new == NULL) {
2238 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
2242 for (i = 0; i < size - 1; i++) {
2243 ll_new[i].vdev = NULL;
2244 ll_new[i].next = &ll_new[i+1];
2246 ll_new[i].next = NULL;
2252 * Create the main linked list along with each individual cores linked list. A used and a free list
2253 * are created to manage entries.
2260 RTE_LCORE_FOREACH_SLAVE(lcore) {
2261 lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
2262 if (lcore_info[lcore].lcore_ll == NULL) {
2263 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
2267 lcore_info[lcore].lcore_ll->device_num = 0;
2268 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
2269 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
2270 if (num_devices % num_switching_cores)
2271 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
2273 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
2276 /* Allocate devices up to a maximum of MAX_DEVICES. */
2277 ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
2283 * Remove a device from the specific data core linked list and from the main linked list. Synchonization
2284 * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
2285 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
2288 destroy_device (volatile struct virtio_net *dev)
2290 struct virtio_net_data_ll *ll_lcore_dev_cur;
2291 struct virtio_net_data_ll *ll_main_dev_cur;
2292 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
2293 struct virtio_net_data_ll *ll_main_dev_last = NULL;
2294 struct vhost_dev *vdev;
2297 dev->flags &= ~VIRTIO_DEV_RUNNING;
2299 vdev = (struct vhost_dev *)dev->priv;
2300 /*set the remove flag. */
2302 while(vdev->ready != DEVICE_SAFE_REMOVE) {
2306 /* Search for entry to be removed from lcore ll */
2307 ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
2308 while (ll_lcore_dev_cur != NULL) {
2309 if (ll_lcore_dev_cur->vdev == vdev) {
2312 ll_lcore_dev_last = ll_lcore_dev_cur;
2313 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
2317 if (ll_lcore_dev_cur == NULL) {
2318 RTE_LOG(ERR, VHOST_CONFIG,
2319 "(%"PRIu64") Failed to find the dev to be destroy.\n",
2324 /* Search for entry to be removed from main ll */
2325 ll_main_dev_cur = ll_root_used;
2326 ll_main_dev_last = NULL;
2327 while (ll_main_dev_cur != NULL) {
2328 if (ll_main_dev_cur->vdev == vdev) {
2331 ll_main_dev_last = ll_main_dev_cur;
2332 ll_main_dev_cur = ll_main_dev_cur->next;
2336 /* Remove entries from the lcore and main ll. */
2337 rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
2338 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
2340 /* Set the dev_removal_flag on each lcore. */
2341 RTE_LCORE_FOREACH_SLAVE(lcore) {
2342 lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
2346 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
2347 * they can no longer access the device removed from the linked lists and that the devices
2348 * are no longer in use.
2350 RTE_LCORE_FOREACH_SLAVE(lcore) {
2351 while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
2356 /* Add the entries back to the lcore and main free ll.*/
2357 put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
2358 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
2360 /* Decrement number of device on the lcore. */
2361 lcore_info[vdev->coreid].lcore_ll->device_num--;
2363 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
2366 struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
2368 /* Stop the RX queue. */
2369 if (rte_eth_dev_rx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
2370 LOG_DEBUG(VHOST_CONFIG,
2371 "(%"PRIu64") In destroy_device: Failed to stop "
2377 LOG_DEBUG(VHOST_CONFIG,
2378 "(%"PRIu64") in destroy_device: Start put mbuf in "
2379 "mempool back to ring for RX queue: %d\n",
2380 dev->device_fh, vdev->vmdq_rx_q);
2382 mbuf_destroy_zcp(vpool);
2384 /* Stop the TX queue. */
2385 if (rte_eth_dev_tx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
2386 LOG_DEBUG(VHOST_CONFIG,
2387 "(%"PRIu64") In destroy_device: Failed to "
2388 "stop tx queue:%d\n",
2389 dev->device_fh, vdev->vmdq_rx_q);
2392 vpool = &vpool_array[vdev->vmdq_rx_q + MAX_QUEUES];
2394 LOG_DEBUG(VHOST_CONFIG,
2395 "(%"PRIu64") destroy_device: Start put mbuf in mempool "
2396 "back to ring for TX queue: %d, dev:(%"PRIu64")\n",
2397 dev->device_fh, (vdev->vmdq_rx_q + MAX_QUEUES),
2400 mbuf_destroy_zcp(vpool);
2401 rte_free(vdev->regions_hpa);
2408 * Calculate the region count of physical continous regions for one particular
2409 * region of whose vhost virtual address is continous. The particular region
2410 * start from vva_start, with size of 'size' in argument.
2413 check_hpa_regions(uint64_t vva_start, uint64_t size)
2415 uint32_t i, nregions = 0, page_size = getpagesize();
2416 uint64_t cur_phys_addr = 0, next_phys_addr = 0;
2417 if (vva_start % page_size) {
2418 LOG_DEBUG(VHOST_CONFIG,
2419 "in check_countinous: vva start(%p) mod page_size(%d) "
2421 (void *)(uintptr_t)vva_start, page_size);
2424 if (size % page_size) {
2425 LOG_DEBUG(VHOST_CONFIG,
2426 "in check_countinous: "
2427 "size((%"PRIu64")) mod page_size(%d) has remainder\n",
2431 for (i = 0; i < size - page_size; i = i + page_size) {
2433 = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
2434 next_phys_addr = rte_mem_virt2phy(
2435 (void *)(uintptr_t)(vva_start + i + page_size));
2436 if ((cur_phys_addr + page_size) != next_phys_addr) {
2438 LOG_DEBUG(VHOST_CONFIG,
2439 "in check_continuous: hva addr:(%p) is not "
2440 "continuous with hva addr:(%p), diff:%d\n",
2441 (void *)(uintptr_t)(vva_start + (uint64_t)i),
2442 (void *)(uintptr_t)(vva_start + (uint64_t)i
2443 + page_size), page_size);
2444 LOG_DEBUG(VHOST_CONFIG,
2445 "in check_continuous: hpa addr:(%p) is not "
2446 "continuous with hpa addr:(%p), "
2447 "diff:(%"PRIu64")\n",
2448 (void *)(uintptr_t)cur_phys_addr,
2449 (void *)(uintptr_t)next_phys_addr,
2450 (next_phys_addr-cur_phys_addr));
2457 * Divide each region whose vhost virtual address is continous into a few
2458 * sub-regions, make sure the physical address within each sub-region are
2459 * continous. And fill offset(to GPA) and size etc. information of each
2460 * sub-region into regions_hpa.
2463 fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory)
2465 uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize();
2466 uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
2468 if (mem_region_hpa == NULL)
2471 for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
2472 vva_start = virtio_memory->regions[regionidx].guest_phys_address +
2473 virtio_memory->regions[regionidx].address_offset;
2474 mem_region_hpa[regionidx_hpa].guest_phys_address
2475 = virtio_memory->regions[regionidx].guest_phys_address;
2476 mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
2477 rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) -
2478 mem_region_hpa[regionidx_hpa].guest_phys_address;
2479 LOG_DEBUG(VHOST_CONFIG,
2480 "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
2483 (mem_region_hpa[regionidx_hpa].guest_phys_address));
2484 LOG_DEBUG(VHOST_CONFIG,
2485 "in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
2488 (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
2490 i < virtio_memory->regions[regionidx].memory_size -
2493 cur_phys_addr = rte_mem_virt2phy(
2494 (void *)(uintptr_t)(vva_start + i));
2495 next_phys_addr = rte_mem_virt2phy(
2496 (void *)(uintptr_t)(vva_start +
2498 if ((cur_phys_addr + page_size) != next_phys_addr) {
2499 mem_region_hpa[regionidx_hpa].guest_phys_address_end =
2500 mem_region_hpa[regionidx_hpa].guest_phys_address +
2502 mem_region_hpa[regionidx_hpa].memory_size
2504 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
2505 "phys addr end [%d]:(%p)\n",
2508 (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
2509 LOG_DEBUG(VHOST_CONFIG,
2510 "in fill_hpa_regions: guest phys addr "
2514 (mem_region_hpa[regionidx_hpa].memory_size));
2515 mem_region_hpa[regionidx_hpa + 1].guest_phys_address
2516 = mem_region_hpa[regionidx_hpa].guest_phys_address_end;
2518 mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
2520 mem_region_hpa[regionidx_hpa].guest_phys_address;
2521 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
2522 " phys addr start[%d]:(%p)\n",
2525 (mem_region_hpa[regionidx_hpa].guest_phys_address));
2526 LOG_DEBUG(VHOST_CONFIG,
2527 "in fill_hpa_regions: host phys addr "
2531 (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
2537 mem_region_hpa[regionidx_hpa].guest_phys_address_end
2538 = mem_region_hpa[regionidx_hpa].guest_phys_address
2540 mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
2541 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
2542 "[%d]:(%p)\n", regionidx_hpa,
2544 (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
2545 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
2546 "[%d]:(%p)\n", regionidx_hpa,
2548 (mem_region_hpa[regionidx_hpa].memory_size));
2551 return regionidx_hpa;
2555 * A new device is added to a data core. First the device is added to the main linked list
2556 * and the allocated to a specific data core.
2559 new_device (struct virtio_net *dev)
2561 struct virtio_net_data_ll *ll_dev;
2562 int lcore, core_add = 0;
2563 uint32_t device_num_min = num_devices;
2564 struct vhost_dev *vdev;
2567 vdev = rte_zmalloc("vhost device", sizeof(*vdev), CACHE_LINE_SIZE);
2569 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
2577 vdev->nregions_hpa = dev->mem->nregions;
2578 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
2580 += check_hpa_regions(
2581 dev->mem->regions[regionidx].guest_phys_address
2582 + dev->mem->regions[regionidx].address_offset,
2583 dev->mem->regions[regionidx].memory_size);
2587 vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region",
2588 sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa,
2590 if (vdev->regions_hpa == NULL) {
2591 RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
2597 if (fill_hpa_memory_regions(
2598 vdev->regions_hpa, dev->mem
2599 ) != vdev->nregions_hpa) {
2601 RTE_LOG(ERR, VHOST_CONFIG,
2602 "hpa memory regions number mismatch: "
2603 "[%d]\n", vdev->nregions_hpa);
2604 rte_free(vdev->regions_hpa);
2611 /* Add device to main ll */
2612 ll_dev = get_data_ll_free_entry(&ll_root_free);
2613 if (ll_dev == NULL) {
2614 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
2615 "of %d devices per core has been reached\n",
2616 dev->device_fh, num_devices);
2617 if (vdev->regions_hpa)
2618 rte_free(vdev->regions_hpa);
2622 ll_dev->vdev = vdev;
2623 add_data_ll_entry(&ll_root_used, ll_dev);
2625 = dev->device_fh * (num_queues / num_devices);
2628 uint32_t index = vdev->vmdq_rx_q;
2629 uint32_t count_in_ring, i;
2630 struct mbuf_table *tx_q;
2632 count_in_ring = rte_ring_count(vpool_array[index].ring);
2634 LOG_DEBUG(VHOST_CONFIG,
2635 "(%"PRIu64") in new_device: mbuf count in mempool "
2636 "before attach is: %d\n",
2638 rte_mempool_count(vpool_array[index].pool));
2639 LOG_DEBUG(VHOST_CONFIG,
2640 "(%"PRIu64") in new_device: mbuf count in ring "
2641 "before attach is : %d\n",
2642 dev->device_fh, count_in_ring);
2645 * Attach all mbufs in vpool.ring and put back intovpool.pool.
2647 for (i = 0; i < count_in_ring; i++)
2648 attach_rxmbuf_zcp(dev);
2650 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
2651 "mempool after attach is: %d\n",
2653 rte_mempool_count(vpool_array[index].pool));
2654 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
2655 "ring after attach is : %d\n",
2657 rte_ring_count(vpool_array[index].ring));
2659 tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
2660 tx_q->txq_id = vdev->vmdq_rx_q;
2662 if (rte_eth_dev_tx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
2663 struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
2665 LOG_DEBUG(VHOST_CONFIG,
2666 "(%"PRIu64") In new_device: Failed to start "
2668 dev->device_fh, vdev->vmdq_rx_q);
2670 mbuf_destroy_zcp(vpool);
2671 rte_free(vdev->regions_hpa);
2676 if (rte_eth_dev_rx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
2677 struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
2679 LOG_DEBUG(VHOST_CONFIG,
2680 "(%"PRIu64") In new_device: Failed to start "
2682 dev->device_fh, vdev->vmdq_rx_q);
2684 /* Stop the TX queue. */
2685 if (rte_eth_dev_tx_queue_stop(ports[0],
2686 vdev->vmdq_rx_q) != 0) {
2687 LOG_DEBUG(VHOST_CONFIG,
2688 "(%"PRIu64") In new_device: Failed to "
2689 "stop tx queue:%d\n",
2690 dev->device_fh, vdev->vmdq_rx_q);
2693 mbuf_destroy_zcp(vpool);
2694 rte_free(vdev->regions_hpa);
2701 /*reset ready flag*/
2702 vdev->ready = DEVICE_MAC_LEARNING;
2705 /* Find a suitable lcore to add the device. */
2706 RTE_LCORE_FOREACH_SLAVE(lcore) {
2707 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
2708 device_num_min = lcore_info[lcore].lcore_ll->device_num;
2712 /* Add device to lcore ll */
2713 ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
2714 if (ll_dev == NULL) {
2715 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
2716 vdev->ready = DEVICE_SAFE_REMOVE;
2717 destroy_device(dev);
2718 if (vdev->regions_hpa)
2719 rte_free(vdev->regions_hpa);
2723 ll_dev->vdev = vdev;
2724 vdev->coreid = core_add;
2726 add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_dev);
2728 /* Initialize device stats */
2729 memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
2731 /* Disable notifications. */
2732 rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
2733 rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
2734 lcore_info[vdev->coreid].lcore_ll->device_num++;
2735 dev->flags |= VIRTIO_DEV_RUNNING;
2737 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, vdev->coreid);
2743 * These callback allow devices to be added to the data core when configuration
2744 * has been fully complete.
2746 static const struct virtio_net_device_ops virtio_net_device_ops =
2748 .new_device = new_device,
2749 .destroy_device = destroy_device,
2753 * This is a thread will wake up after a period to print stats if the user has
2759 struct virtio_net_data_ll *dev_ll;
2760 uint64_t tx_dropped, rx_dropped;
2761 uint64_t tx, tx_total, rx, rx_total;
2763 const char clr[] = { 27, '[', '2', 'J', '\0' };
2764 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
2767 sleep(enable_stats);
2769 /* Clear screen and move to top left */
2770 printf("%s%s", clr, top_left);
2772 printf("\nDevice statistics ====================================");
2774 dev_ll = ll_root_used;
2775 while (dev_ll != NULL) {
2776 device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
2777 tx_total = dev_statistics[device_fh].tx_total;
2778 tx = dev_statistics[device_fh].tx;
2779 tx_dropped = tx_total - tx;
2780 if (zero_copy == 0) {
2781 rx_total = rte_atomic64_read(
2782 &dev_statistics[device_fh].rx_total_atomic);
2783 rx = rte_atomic64_read(
2784 &dev_statistics[device_fh].rx_atomic);
2786 rx_total = dev_statistics[device_fh].rx_total;
2787 rx = dev_statistics[device_fh].rx;
2789 rx_dropped = rx_total - rx;
2791 printf("\nStatistics for device %"PRIu32" ------------------------------"
2792 "\nTX total: %"PRIu64""
2793 "\nTX dropped: %"PRIu64""
2794 "\nTX successful: %"PRIu64""
2795 "\nRX total: %"PRIu64""
2796 "\nRX dropped: %"PRIu64""
2797 "\nRX successful: %"PRIu64"",
2806 dev_ll = dev_ll->next;
2808 printf("\n======================================================\n");
2813 setup_mempool_tbl(int socket, uint32_t index, char *pool_name,
2814 char *ring_name, uint32_t nb_mbuf)
2816 uint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM;
2817 vpool_array[index].pool
2818 = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP,
2819 MBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private),
2820 rte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize,
2821 rte_pktmbuf_init, NULL, socket, 0);
2822 if (vpool_array[index].pool != NULL) {
2823 vpool_array[index].ring
2824 = rte_ring_create(ring_name,
2825 rte_align32pow2(nb_mbuf + 1),
2826 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
2827 if (likely(vpool_array[index].ring != NULL)) {
2828 LOG_DEBUG(VHOST_CONFIG,
2829 "in setup_mempool_tbl: mbuf count in "
2831 rte_mempool_count(vpool_array[index].pool));
2832 LOG_DEBUG(VHOST_CONFIG,
2833 "in setup_mempool_tbl: mbuf count in "
2835 rte_ring_count(vpool_array[index].ring));
2837 rte_exit(EXIT_FAILURE, "ring_create(%s) failed",
2841 /* Need consider head room. */
2842 vpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM;
2844 rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name);
2850 * Main function, does initialisation and calls the per-lcore functions. The CUSE
2851 * device is also registered here to handle the IOCTLs.
2854 MAIN(int argc, char *argv[])
2856 struct rte_mempool *mbuf_pool = NULL;
2857 unsigned lcore_id, core_id = 0;
2858 unsigned nb_ports, valid_num_ports;
2860 uint8_t portid, queue_id = 0;
2861 static pthread_t tid;
2864 ret = rte_eal_init(argc, argv);
2866 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
2870 /* parse app arguments */
2871 ret = us_vhost_parse_args(argc, argv);
2873 rte_exit(EXIT_FAILURE, "Invalid argument\n");
2875 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
2876 if (rte_lcore_is_enabled(lcore_id))
2877 lcore_ids[core_id ++] = lcore_id;
2879 if (rte_lcore_count() > RTE_MAX_LCORE)
2880 rte_exit(EXIT_FAILURE,"Not enough cores\n");
2882 /*set the number of swithcing cores available*/
2883 num_switching_cores = rte_lcore_count()-1;
2885 /* Get the number of physical ports. */
2886 nb_ports = rte_eth_dev_count();
2887 if (nb_ports > RTE_MAX_ETHPORTS)
2888 nb_ports = RTE_MAX_ETHPORTS;
2891 * Update the global var NUM_PORTS and global array PORTS
2892 * and get value of var VALID_NUM_PORTS according to system ports number
2894 valid_num_ports = check_ports_num(nb_ports);
2896 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
2897 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
2898 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
2902 if (zero_copy == 0) {
2903 /* Create the mbuf pool. */
2904 mbuf_pool = rte_mempool_create(
2908 MBUF_SIZE, MBUF_CACHE_SIZE,
2909 sizeof(struct rte_pktmbuf_pool_private),
2910 rte_pktmbuf_pool_init, NULL,
2911 rte_pktmbuf_init, NULL,
2912 rte_socket_id(), 0);
2913 if (mbuf_pool == NULL)
2914 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
2916 for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
2917 vpool_array[queue_id].pool = mbuf_pool;
2919 if (vm2vm_mode == VM2VM_HARDWARE) {
2920 /* Enable VT loop back to let L2 switch to do it. */
2921 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
2922 LOG_DEBUG(VHOST_CONFIG,
2923 "Enable loop back for L2 switch in vmdq.\n");
2927 char pool_name[RTE_MEMPOOL_NAMESIZE];
2928 char ring_name[RTE_MEMPOOL_NAMESIZE];
2931 * Zero copy defers queue RX/TX start to the time when guest
2932 * finishes its startup and packet buffers from that guest are
2935 rx_conf_default.rx_deferred_start = (uint8_t)zero_copy;
2936 rx_conf_default.rx_drop_en = 0;
2937 tx_conf_default.tx_deferred_start = (uint8_t)zero_copy;
2938 nb_mbuf = num_rx_descriptor
2939 + num_switching_cores * MBUF_CACHE_SIZE_ZCP
2940 + num_switching_cores * MAX_PKT_BURST;
2942 for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
2943 snprintf(pool_name, sizeof(pool_name),
2944 "rxmbuf_pool_%u", queue_id);
2945 snprintf(ring_name, sizeof(ring_name),
2946 "rxmbuf_ring_%u", queue_id);
2947 setup_mempool_tbl(rte_socket_id(), queue_id,
2948 pool_name, ring_name, nb_mbuf);
2951 nb_mbuf = num_tx_descriptor
2952 + num_switching_cores * MBUF_CACHE_SIZE_ZCP
2953 + num_switching_cores * MAX_PKT_BURST;
2955 for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
2956 snprintf(pool_name, sizeof(pool_name),
2957 "txmbuf_pool_%u", queue_id);
2958 snprintf(ring_name, sizeof(ring_name),
2959 "txmbuf_ring_%u", queue_id);
2960 setup_mempool_tbl(rte_socket_id(),
2961 (queue_id + MAX_QUEUES),
2962 pool_name, ring_name, nb_mbuf);
2965 if (vm2vm_mode == VM2VM_HARDWARE) {
2966 /* Enable VT loop back to let L2 switch to do it. */
2967 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
2968 LOG_DEBUG(VHOST_CONFIG,
2969 "Enable loop back for L2 switch in vmdq.\n");
2972 /* Set log level. */
2973 rte_set_log_level(LOG_LEVEL);
2975 /* initialize all ports */
2976 for (portid = 0; portid < nb_ports; portid++) {
2977 /* skip ports that are not enabled */
2978 if ((enabled_port_mask & (1 << portid)) == 0) {
2979 RTE_LOG(INFO, VHOST_PORT,
2980 "Skipping disabled port %d\n", portid);
2983 if (port_init(portid) != 0)
2984 rte_exit(EXIT_FAILURE,
2985 "Cannot initialize network ports\n");
2988 /* Initialise all linked lists. */
2989 if (init_data_ll() == -1)
2990 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
2992 /* Initialize device stats */
2993 memset(&dev_statistics, 0, sizeof(dev_statistics));
2995 /* Enable stats if the user option is set. */
2997 pthread_create(&tid, NULL, (void*)print_stats, NULL );
2999 /* Launch all data cores. */
3000 if (zero_copy == 0) {
3001 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
3002 rte_eal_remote_launch(switch_worker,
3003 mbuf_pool, lcore_id);
3006 uint32_t count_in_mempool, index, i;
3007 for (index = 0; index < 2*MAX_QUEUES; index++) {
3008 /* For all RX and TX queues. */
3010 = rte_mempool_count(vpool_array[index].pool);
3013 * Transfer all un-attached mbufs from vpool.pool
3016 for (i = 0; i < count_in_mempool; i++) {
3017 struct rte_mbuf *mbuf
3018 = __rte_mbuf_raw_alloc(
3019 vpool_array[index].pool);
3020 rte_ring_sp_enqueue(vpool_array[index].ring,
3024 LOG_DEBUG(VHOST_CONFIG,
3025 "in MAIN: mbuf count in mempool at initial "
3026 "is: %d\n", count_in_mempool);
3027 LOG_DEBUG(VHOST_CONFIG,
3028 "in MAIN: mbuf count in ring at initial is :"
3030 rte_ring_count(vpool_array[index].ring));
3033 RTE_LCORE_FOREACH_SLAVE(lcore_id)
3034 rte_eal_remote_launch(switch_worker_zcp, NULL,
3039 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
3041 /* Register CUSE device to handle IOCTLs. */
3042 ret = rte_vhost_driver_register((char *)&dev_basename);
3044 rte_exit(EXIT_FAILURE,"CUSE device setup failure.\n");
3046 rte_vhost_driver_callback_register(&virtio_net_device_ops);
3048 /* Start CUSE session. */
3049 rte_vhost_driver_session_start();