4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
53 #include "virtio-net.h"
54 #include "xen_vhost.h"
56 #define MAX_QUEUES 128
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
65 (num_switching_cores*MAX_PKT_BURST) + \
66 (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
67 (num_switching_cores*MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 64
72 * RX and TX Prefetch, Host, and Write-back threshold values should be
73 * carefully set for optimal performance. Consult the network
74 * controller's datasheet and supporting DPDK documentation for guidance
75 * on how these parameters should be set.
77 #define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
78 #define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
79 #define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
82 * These default values are optimized for use with the Intel(R) 82599 10 GbE
83 * Controller and the DPDK ixgbe PMD. Consider using other values for other
84 * network controllers and/or network drivers.
86 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
87 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
88 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
90 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
91 #define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
92 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
94 /* State of virtio device. */
95 #define DEVICE_NOT_READY 0
96 #define DEVICE_READY 1
97 #define DEVICE_SAFE_REMOVE 2
99 /* Config_core_flag status definitions. */
100 #define REQUEST_DEV_REMOVAL 1
101 #define ACK_DEV_REMOVAL 0
103 /* Configurable number of RX/TX ring descriptors */
104 #define RTE_TEST_RX_DESC_DEFAULT 128
105 #define RTE_TEST_TX_DESC_DEFAULT 512
107 #define INVALID_PORT_ID 0xFF
109 /* Max number of devices. Limited by vmdq. */
110 #define MAX_DEVICES 64
112 /* Size of buffers used for snprintfs. */
113 #define MAX_PRINT_BUFF 6072
116 /* Maximum long option length for option parsing. */
117 #define MAX_LONG_OPT_SZ 64
119 /* Used to compare MAC addresses. */
120 #define MAC_ADDR_CMP 0xFFFFFFFFFFFF
122 /* mask of enabled ports */
123 static uint32_t enabled_port_mask = 0;
125 /*Number of switching cores enabled*/
126 static uint32_t num_switching_cores = 0;
128 /* number of devices/queues to support*/
129 static uint32_t num_queues = 0;
130 uint32_t num_devices = 0;
132 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
133 static uint32_t enable_vm2vm = 1;
135 static uint32_t enable_stats = 0;
137 /* empty vmdq configuration structure. Filled in programatically */
138 static const struct rte_eth_conf vmdq_conf_default = {
140 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
142 .header_split = 0, /**< Header Split disabled */
143 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
144 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
146 * It is necessary for 1G NIC such as I350,
147 * this fixes bug of ipv4 forwarding in guest can't
148 * forward pakets from one virtio dev to another virtio dev.
150 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
151 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
152 .hw_strip_crc = 0, /**< CRC stripped by hardware */
156 .mq_mode = ETH_MQ_TX_NONE,
160 * should be overridden separately in code with
164 .nb_queue_pools = ETH_8_POOLS,
165 .enable_default_pool = 0,
168 .pool_map = {{0, 0},},
173 static unsigned lcore_ids[RTE_MAX_LCORE];
174 static uint8_t ports[RTE_MAX_ETHPORTS];
175 static unsigned num_ports = 0; /**< The number of ports specified in command line */
177 const uint16_t vlan_tags[] = {
178 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
179 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
180 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
181 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
182 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
183 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
184 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
185 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
188 /* ethernet addresses of ports */
189 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
191 /* heads for the main used and free linked lists for the data path. */
192 static struct virtio_net_data_ll *ll_root_used = NULL;
193 static struct virtio_net_data_ll *ll_root_free = NULL;
195 /* Array of data core structures containing information on individual core linked lists. */
196 static struct lcore_info lcore_info[RTE_MAX_LCORE];
198 /* Used for queueing bursts of TX packets. */
202 struct rte_mbuf *m_table[MAX_PKT_BURST];
205 /* TX queue for each data core. */
206 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
208 /* Vlan header struct used to insert vlan tags on TX. */
210 unsigned char h_dest[ETH_ALEN];
211 unsigned char h_source[ETH_ALEN];
214 __be16 h_vlan_encapsulated_proto;
217 /* Header lengths. */
219 #define VLAN_ETH_HLEN 18
221 /* Per-device statistics struct */
222 struct device_statistics {
224 rte_atomic64_t rx_total;
227 } __rte_cache_aligned;
228 struct device_statistics dev_statistics[MAX_DEVICES];
231 * Builds up the correct configuration for VMDQ VLAN pool map
232 * according to the pool & queue limits.
235 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
237 struct rte_eth_vmdq_rx_conf conf;
240 memset(&conf, 0, sizeof(conf));
241 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
242 conf.nb_pool_maps = num_devices;
244 for (i = 0; i < conf.nb_pool_maps; i++) {
245 conf.pool_map[i].vlan_id = vlan_tags[ i ];
246 conf.pool_map[i].pools = (1UL << i);
249 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
250 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
251 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
256 * Validate the device number according to the max pool number gotten form dev_info
257 * If the device number is invalid, give the error message and return -1.
258 * Each device must have its own pool.
261 validate_num_devices(uint32_t max_nb_devices)
263 if (num_devices > max_nb_devices) {
264 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
271 * Initialises a given port using global settings and with the rx buffers
272 * coming from the mbuf_pool passed as parameter
275 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
277 struct rte_eth_dev_info dev_info;
278 struct rte_eth_rxconf *rxconf;
279 struct rte_eth_conf port_conf;
280 uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
281 const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
285 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
286 rte_eth_dev_info_get (port, &dev_info);
288 /*configure the number of supported virtio devices based on VMDQ limits */
289 num_devices = dev_info.max_vmdq_pools;
290 num_queues = dev_info.max_rx_queues;
292 retval = validate_num_devices(MAX_DEVICES);
296 /* Get port configuration. */
297 retval = get_eth_conf(&port_conf, num_devices);
301 if (port >= rte_eth_dev_count()) return -1;
303 rx_rings = (uint16_t)num_queues,
304 /* Configure ethernet device. */
305 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
309 rte_eth_dev_info_get(port, &dev_info);
310 rxconf = &dev_info.default_rxconf;
311 rxconf->rx_drop_en = 1;
312 /* Setup the queues. */
313 for (q = 0; q < rx_rings; q ++) {
314 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
315 rte_eth_dev_socket_id(port), rxconf,
320 for (q = 0; q < tx_rings; q ++) {
321 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
322 rte_eth_dev_socket_id(port),
328 /* Start the device. */
329 retval = rte_eth_dev_start(port);
333 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
334 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
335 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
336 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
338 vmdq_ports_eth_addr[port].addr_bytes[0],
339 vmdq_ports_eth_addr[port].addr_bytes[1],
340 vmdq_ports_eth_addr[port].addr_bytes[2],
341 vmdq_ports_eth_addr[port].addr_bytes[3],
342 vmdq_ports_eth_addr[port].addr_bytes[4],
343 vmdq_ports_eth_addr[port].addr_bytes[5]);
349 * Parse the portmask provided at run time.
352 parse_portmask(const char *portmask)
359 /* parse hexadecimal string */
360 pm = strtoul(portmask, &end, 16);
361 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
372 * Parse num options at run time.
375 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
382 /* parse unsigned int string */
383 num = strtoul(q_arg, &end, 10);
384 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
387 if (num > max_valid_value)
398 us_vhost_usage(const char *prgname)
400 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
401 " -p PORTMASK: Set mask for ports to be used by application\n"
402 " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
403 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
408 * Parse the arguments given in the command line of the application.
411 us_vhost_parse_args(int argc, char **argv)
416 const char *prgname = argv[0];
417 static struct option long_option[] = {
418 {"vm2vm", required_argument, NULL, 0},
419 {"stats", required_argument, NULL, 0},
423 /* Parse command line */
424 while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
428 enabled_port_mask = parse_portmask(optarg);
429 if (enabled_port_mask == 0) {
430 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
431 us_vhost_usage(prgname);
437 /* Enable/disable vm2vm comms. */
438 if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
439 ret = parse_num_opt(optarg, 1);
441 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
442 us_vhost_usage(prgname);
449 /* Enable/disable stats. */
450 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
451 ret = parse_num_opt(optarg, INT32_MAX);
453 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
454 us_vhost_usage(prgname);
462 /* Invalid option - print options. */
464 us_vhost_usage(prgname);
469 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
470 if (enabled_port_mask & (1 << i))
471 ports[num_ports++] = (uint8_t)i;
474 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
475 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
476 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
484 * Update the global var NUM_PORTS and array PORTS according to system ports number
485 * and return valid ports number
487 static unsigned check_ports_num(unsigned nb_ports)
489 unsigned valid_num_ports = num_ports;
492 if (num_ports > nb_ports) {
493 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
494 num_ports, nb_ports);
495 num_ports = nb_ports;
498 for (portid = 0; portid < num_ports; portid ++) {
499 if (ports[portid] >= nb_ports) {
500 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
501 ports[portid], (nb_ports - 1));
502 ports[portid] = INVALID_PORT_ID;
506 return valid_num_ports;
510 * Macro to print out packet contents. Wrapped in debug define so that the
511 * data path is not effected when debug is disabled.
514 #define PRINT_PACKET(device, addr, size, header) do { \
515 char *pkt_addr = (char*)(addr); \
516 unsigned int index; \
517 char packet[MAX_PRINT_BUFF]; \
520 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
522 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
523 for (index = 0; index < (size); index++) { \
524 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
525 "%02hhx ", pkt_addr[index]); \
527 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
529 LOG_DEBUG(VHOST_DATA, "%s", packet); \
532 #define PRINT_PACKET(device, addr, size, header) do{} while(0)
536 * Function to convert guest physical addresses to vhost virtual addresses. This
537 * is used to convert virtio buffer addresses.
539 static inline uint64_t __attribute__((always_inline))
540 gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
542 struct virtio_memory_regions *region;
544 uint64_t vhost_va = 0;
546 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
547 region = &dev->mem->regions[regionidx];
548 if ((guest_pa >= region->guest_phys_address) &&
549 (guest_pa <= region->guest_phys_address_end)) {
550 vhost_va = region->address_offset + guest_pa;
554 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
555 dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
561 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
562 * be received from the physical port or from another virtio device. A packet
563 * count is returned to indicate the number of packets that were succesfully
564 * added to the RX queue.
566 static inline uint32_t __attribute__((always_inline))
567 virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
569 struct vhost_virtqueue *vq;
570 struct vring_desc *desc;
571 struct rte_mbuf *buff;
572 /* The virtio_hdr is initialised to 0. */
573 struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
574 uint64_t buff_addr = 0;
575 uint64_t buff_hdr_addr = 0;
576 uint32_t head[MAX_PKT_BURST], packet_len = 0;
577 uint32_t head_idx, packet_success = 0;
578 uint16_t avail_idx, res_cur_idx;
579 uint16_t res_base_idx, res_end_idx;
580 uint16_t free_entries;
583 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
584 vq = dev->virtqueue_rx;
585 count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
586 /* As many data cores may want access to available buffers, they need to be reserved. */
589 res_base_idx = vq->last_used_idx_res;
591 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
593 free_entries = (avail_idx - res_base_idx);
595 /*check that we have enough buffers*/
596 if (unlikely(count > free_entries))
597 count = free_entries;
602 res_end_idx = res_base_idx + count;
603 /* vq->last_used_idx_res is atomically updated. */
604 success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
606 } while (unlikely(success == 0));
607 res_cur_idx = res_base_idx;
608 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
610 /* Prefetch available ring to retrieve indexes. */
611 rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
613 /* Retrieve all of the head indexes first to avoid caching issues. */
614 for (head_idx = 0; head_idx < count; head_idx++)
615 head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
617 /*Prefetch descriptor index. */
618 rte_prefetch0(&vq->desc[head[packet_success]]);
620 while (res_cur_idx != res_end_idx) {
621 /* Get descriptor from available ring */
622 desc = &vq->desc[head[packet_success]];
623 /* Prefetch descriptor address. */
626 buff = pkts[packet_success];
628 /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
629 buff_addr = gpa_to_vva(dev, desc->addr);
630 /* Prefetch buffer address. */
631 rte_prefetch0((void*)(uintptr_t)buff_addr);
634 /* Copy virtio_hdr to packet and increment buffer address */
635 buff_hdr_addr = buff_addr;
636 packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
639 * If the descriptors are chained the header and data are placed in
642 if (desc->flags & VRING_DESC_F_NEXT) {
643 desc->len = vq->vhost_hlen;
644 desc = &vq->desc[desc->next];
645 /* Buffer address translation. */
646 buff_addr = gpa_to_vva(dev, desc->addr);
647 desc->len = rte_pktmbuf_data_len(buff);
649 buff_addr += vq->vhost_hlen;
650 desc->len = packet_len;
654 /* Update used ring with desc information */
655 vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
656 vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
658 /* Copy mbuf data to buffer */
659 rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
664 /* mergeable is disabled then a header is required per buffer. */
665 rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
666 if (res_cur_idx < res_end_idx) {
667 /* Prefetch descriptor index. */
668 rte_prefetch0(&vq->desc[head[packet_success]]);
672 rte_compiler_barrier();
674 /* Wait until it's our turn to add our buffer to the used ring. */
675 while (unlikely(vq->last_used_idx != res_base_idx))
678 *(volatile uint16_t *)&vq->used->idx += count;
680 vq->last_used_idx = res_end_idx;
686 * Compares a packet destination MAC address to a device MAC address.
688 static inline int __attribute__((always_inline))
689 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
691 return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
695 * This function registers mac along with a
696 * vlan tag to a VMDQ.
699 link_vmdq(struct virtio_net *dev)
702 struct virtio_net_data_ll *dev_ll;
704 dev_ll = ll_root_used;
706 while (dev_ll != NULL) {
707 if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
708 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
711 dev_ll = dev_ll->next;
714 /* vlan_tag currently uses the device_id. */
715 dev->vlan_tag = vlan_tags[dev->device_fh];
716 dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
718 /* Print out VMDQ registration info. */
719 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
721 dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
722 dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
723 dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
726 /* Register the MAC address. */
727 ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
729 RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
734 /* Enable stripping of the vlan tag as we handle routing. */
735 rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
737 rte_compiler_barrier();
738 /* Set device as ready for RX. */
739 dev->ready = DEVICE_READY;
745 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
746 * queue before disabling RX on the device.
749 unlink_vmdq(struct virtio_net *dev)
753 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
755 if (dev->ready == DEVICE_READY) {
756 /*clear MAC and VLAN settings*/
757 rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
758 for (i = 0; i < 6; i++)
759 dev->mac_address.addr_bytes[i] = 0;
763 /*Clear out the receive buffers*/
764 rx_count = rte_eth_rx_burst(ports[0],
765 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
768 for (i = 0; i < rx_count; i++)
769 rte_pktmbuf_free(pkts_burst[i]);
771 rx_count = rte_eth_rx_burst(ports[0],
772 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
775 dev->ready = DEVICE_NOT_READY;
780 * Check if the packet destination MAC address is for a local device. If so then put
781 * the packet on that devices RX queue. If not then return.
783 static inline unsigned __attribute__((always_inline))
784 virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
786 struct virtio_net_data_ll *dev_ll;
787 struct ether_hdr *pkt_hdr;
790 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
792 /*get the used devices list*/
793 dev_ll = ll_root_used;
795 while (dev_ll != NULL) {
796 if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
797 &dev_ll->dev->mac_address)) {
799 /* Drop the packet if the TX packet is destined for the TX device. */
800 if (dev_ll->dev->device_fh == dev->device_fh) {
801 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
802 dev_ll->dev->device_fh);
807 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
809 if (dev_ll->dev->remove) {
810 /*drop the packet if the device is marked for removal*/
811 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
813 /*send the packet to the local virtio device*/
814 ret = virtio_dev_rx(dev_ll->dev, &m, 1);
816 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
817 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
818 dev_statistics[dev->device_fh].tx_total++;
819 dev_statistics[dev->device_fh].tx += ret;
825 dev_ll = dev_ll->next;
832 * This function routes the TX packet to the correct interface. This may be a local device
833 * or the physical port.
835 static inline void __attribute__((always_inline))
836 virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
838 struct mbuf_table *tx_q;
839 struct vlan_ethhdr *vlan_hdr;
840 struct rte_mbuf **m_table;
841 struct rte_mbuf *mbuf;
843 const uint16_t lcore_id = rte_lcore_id();
845 /*check if destination is local VM*/
846 if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
850 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
852 /*Add packet to the port tx queue*/
853 tx_q = &lcore_tx_queue[lcore_id];
856 /* Allocate an mbuf and populate the structure. */
857 mbuf = rte_pktmbuf_alloc(mbuf_pool);
861 mbuf->data_len = m->data_len + VLAN_HLEN;
862 mbuf->pkt_len = mbuf->data_len;
864 /* Copy ethernet header to mbuf. */
865 rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
866 rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
869 /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
870 vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
871 vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
872 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
873 vlan_hdr->h_vlan_TCI = htons(vlan_tag);
875 /* Copy the remaining packet contents to the mbuf. */
876 rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN),
877 rte_pktmbuf_mtod_offset(m, const void *, ETH_HLEN),
878 (m->data_len - ETH_HLEN));
879 tx_q->m_table[len] = mbuf;
882 dev_statistics[dev->device_fh].tx_total++;
883 dev_statistics[dev->device_fh].tx++;
886 if (unlikely(len == MAX_PKT_BURST)) {
887 m_table = (struct rte_mbuf **)tx_q->m_table;
888 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
889 /* Free any buffers not handled by TX and update the port stats. */
890 if (unlikely(ret < len)) {
892 rte_pktmbuf_free(m_table[ret]);
893 } while (++ret < len);
903 static inline void __attribute__((always_inline))
904 virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
907 struct vhost_virtqueue *vq;
908 struct vring_desc *desc;
909 uint64_t buff_addr = 0;
910 uint32_t head[MAX_PKT_BURST];
913 uint16_t free_entries, packet_success = 0;
916 vq = dev->virtqueue_tx;
917 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
919 /* If there are no available buffers then return. */
920 if (vq->last_used_idx == avail_idx)
923 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
925 /* Prefetch available ring to retrieve head indexes. */
926 rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
928 /*get the number of free entries in the ring*/
929 free_entries = avail_idx - vq->last_used_idx;
930 free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
932 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
933 /* Retrieve all of the head indexes first to avoid caching issues. */
934 for (i = 0; i < free_entries; i++)
935 head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
937 /* Prefetch descriptor index. */
938 rte_prefetch0(&vq->desc[head[packet_success]]);
940 while (packet_success < free_entries) {
941 desc = &vq->desc[head[packet_success]];
942 /* Prefetch descriptor address. */
945 if (packet_success < (free_entries - 1)) {
946 /* Prefetch descriptor index. */
947 rte_prefetch0(&vq->desc[head[packet_success+1]]);
950 /* Update used index buffer information. */
951 used_idx = vq->last_used_idx & (vq->size - 1);
952 vq->used->ring[used_idx].id = head[packet_success];
953 vq->used->ring[used_idx].len = 0;
955 /* Discard first buffer as it is the virtio header */
956 desc = &vq->desc[desc->next];
958 /* Buffer address translation. */
959 buff_addr = gpa_to_vva(dev, desc->addr);
960 /* Prefetch buffer address. */
961 rte_prefetch0((void*)(uintptr_t)buff_addr);
963 /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
964 m.data_len = desc->len;
968 virtio_tx_route(dev, &m, mbuf_pool, 0);
974 rte_compiler_barrier();
975 vq->used->idx += packet_success;
976 /* Kick guest if required. */
980 * This function is called by each data core. It handles all RX/TX registered with the
981 * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
982 * with all devices in the main linked list.
985 switch_worker(__attribute__((unused)) void *arg)
987 struct rte_mempool *mbuf_pool = arg;
988 struct virtio_net *dev = NULL;
989 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
990 struct virtio_net_data_ll *dev_ll;
991 struct mbuf_table *tx_q;
992 volatile struct lcore_ll_info *lcore_ll;
993 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
994 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
996 const uint16_t lcore_id = rte_lcore_id();
997 const uint16_t num_cores = (uint16_t)rte_lcore_count();
998 uint16_t rx_count = 0;
1000 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
1001 lcore_ll = lcore_info[lcore_id].lcore_ll;
1004 tx_q = &lcore_tx_queue[lcore_id];
1005 for (i = 0; i < num_cores; i ++) {
1006 if (lcore_ids[i] == lcore_id) {
1013 cur_tsc = rte_rdtsc();
1015 * TX burst queue drain
1017 diff_tsc = cur_tsc - prev_tsc;
1018 if (unlikely(diff_tsc > drain_tsc)) {
1021 LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
1023 /*Tx any packets in the queue*/
1024 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
1025 (struct rte_mbuf **)tx_q->m_table,
1026 (uint16_t)tx_q->len);
1027 if (unlikely(ret < tx_q->len)) {
1029 rte_pktmbuf_free(tx_q->m_table[ret]);
1030 } while (++ret < tx_q->len);
1041 * Inform the configuration core that we have exited the linked list and that no devices are
1042 * in use if requested.
1044 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
1045 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1050 dev_ll = lcore_ll->ll_root_used;
1052 while (dev_ll != NULL) {
1053 /*get virtio device ID*/
1056 if (unlikely(dev->remove)) {
1057 dev_ll = dev_ll->next;
1059 dev->ready = DEVICE_SAFE_REMOVE;
1062 if (likely(dev->ready == DEVICE_READY)) {
1064 rx_count = rte_eth_rx_burst(ports[0],
1065 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
1068 ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
1070 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
1071 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
1073 while (likely(rx_count)) {
1075 rte_pktmbuf_free_seg(pkts_burst[rx_count]);
1081 if (likely(!dev->remove))
1083 virtio_dev_tx(dev, mbuf_pool);
1085 /*move to the next device in the list*/
1086 dev_ll = dev_ll->next;
1094 * Add an entry to a used linked list. A free entry must first be found in the free linked list
1095 * using get_data_ll_free_entry();
1098 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1100 struct virtio_net_data_ll *ll = *ll_root_addr;
1102 /* Set next as NULL and use a compiler barrier to avoid reordering. */
1103 ll_dev->next = NULL;
1104 rte_compiler_barrier();
1106 /* If ll == NULL then this is the first device. */
1108 /* Increment to the tail of the linked list. */
1109 while ((ll->next != NULL) )
1114 *ll_root_addr = ll_dev;
1119 * Remove an entry from a used linked list. The entry must then be added to the free linked list
1120 * using put_data_ll_free_entry().
1123 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
1125 struct virtio_net_data_ll *ll = *ll_root_addr;
1128 *ll_root_addr = ll_dev->next;
1130 ll_dev_last->next = ll_dev->next;
1134 * Find and return an entry from the free linked list.
1136 static struct virtio_net_data_ll *
1137 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
1139 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1140 struct virtio_net_data_ll *ll_dev;
1142 if (ll_free == NULL)
1146 *ll_root_addr = ll_free->next;
1152 * Place an entry back on to the free linked list.
1155 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1157 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1159 ll_dev->next = ll_free;
1160 *ll_root_addr = ll_dev;
1164 * Creates a linked list of a given size.
1166 static struct virtio_net_data_ll *
1167 alloc_data_ll(uint32_t size)
1169 struct virtio_net_data_ll *ll_new;
1172 /* Malloc and then chain the linked list. */
1173 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
1174 if (ll_new == NULL) {
1175 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
1179 for (i = 0; i < size - 1; i++) {
1180 ll_new[i].dev = NULL;
1181 ll_new[i].next = &ll_new[i+1];
1183 ll_new[i].next = NULL;
1189 * Create the main linked list along with each individual cores linked list. A used and a free list
1190 * are created to manage entries.
1197 RTE_LCORE_FOREACH_SLAVE(lcore) {
1198 lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
1199 if (lcore_info[lcore].lcore_ll == NULL) {
1200 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
1204 lcore_info[lcore].lcore_ll->device_num = 0;
1205 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1206 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
1207 if (num_devices % num_switching_cores)
1208 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
1210 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
1213 /* Allocate devices up to a maximum of MAX_DEVICES. */
1214 ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
1219 * Remove a device from the specific data core linked list and from the main linked list. The
1220 * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
1224 destroy_device (volatile struct virtio_net *dev)
1226 struct virtio_net_data_ll *ll_lcore_dev_cur;
1227 struct virtio_net_data_ll *ll_main_dev_cur;
1228 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
1229 struct virtio_net_data_ll *ll_main_dev_last = NULL;
1232 dev->flags &= ~VIRTIO_DEV_RUNNING;
1234 /*set the remove flag. */
1237 while(dev->ready != DEVICE_SAFE_REMOVE) {
1241 /* Search for entry to be removed from lcore ll */
1242 ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
1243 while (ll_lcore_dev_cur != NULL) {
1244 if (ll_lcore_dev_cur->dev == dev) {
1247 ll_lcore_dev_last = ll_lcore_dev_cur;
1248 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
1252 /* Search for entry to be removed from main ll */
1253 ll_main_dev_cur = ll_root_used;
1254 ll_main_dev_last = NULL;
1255 while (ll_main_dev_cur != NULL) {
1256 if (ll_main_dev_cur->dev == dev) {
1259 ll_main_dev_last = ll_main_dev_cur;
1260 ll_main_dev_cur = ll_main_dev_cur->next;
1264 if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
1265 RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
1269 /* Remove entries from the lcore and main ll. */
1270 rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
1271 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
1273 /* Set the dev_removal_flag on each lcore. */
1274 RTE_LCORE_FOREACH_SLAVE(lcore) {
1275 lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
1279 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
1280 * they can no longer access the device removed from the linked lists and that the devices
1281 * are no longer in use.
1283 RTE_LCORE_FOREACH_SLAVE(lcore) {
1284 while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
1289 /* Add the entries back to the lcore and main free ll.*/
1290 put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
1291 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
1293 /* Decrement number of device on the lcore. */
1294 lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
1296 RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
1300 * A new device is added to a data core. First the device is added to the main linked list
1301 * and the allocated to a specific data core.
1304 new_device (struct virtio_net *dev)
1306 struct virtio_net_data_ll *ll_dev;
1307 int lcore, core_add = 0;
1308 uint32_t device_num_min = num_devices;
1310 /* Add device to main ll */
1311 ll_dev = get_data_ll_free_entry(&ll_root_free);
1312 if (ll_dev == NULL) {
1313 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
1314 "of %d devices per core has been reached\n",
1315 dev->device_fh, num_devices);
1319 add_data_ll_entry(&ll_root_used, ll_dev);
1321 /*reset ready flag*/
1322 dev->ready = DEVICE_NOT_READY;
1325 /* Find a suitable lcore to add the device. */
1326 RTE_LCORE_FOREACH_SLAVE(lcore) {
1327 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
1328 device_num_min = lcore_info[lcore].lcore_ll->device_num;
1332 /* Add device to lcore ll */
1333 ll_dev->dev->coreid = core_add;
1334 ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
1335 if (ll_dev == NULL) {
1336 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
1337 destroy_device(dev);
1341 add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
1343 /* Initialize device stats */
1344 memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
1346 lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
1347 dev->flags |= VIRTIO_DEV_RUNNING;
1349 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
1357 * These callback allow devices to be added to the data core when configuration
1358 * has been fully complete.
1360 static const struct virtio_net_device_ops virtio_net_device_ops =
1362 .new_device = new_device,
1363 .destroy_device = destroy_device,
1367 * This is a thread will wake up after a period to print stats if the user has
1373 struct virtio_net_data_ll *dev_ll;
1374 uint64_t tx_dropped, rx_dropped;
1375 uint64_t tx, tx_total, rx, rx_total;
1377 const char clr[] = { 27, '[', '2', 'J', '\0' };
1378 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1381 sleep(enable_stats);
1383 /* Clear screen and move to top left */
1384 printf("%s%s", clr, top_left);
1386 printf("\nDevice statistics ====================================");
1388 dev_ll = ll_root_used;
1389 while (dev_ll != NULL) {
1390 device_fh = (uint32_t)dev_ll->dev->device_fh;
1391 tx_total = dev_statistics[device_fh].tx_total;
1392 tx = dev_statistics[device_fh].tx;
1393 tx_dropped = tx_total - tx;
1394 rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
1395 rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
1396 rx_dropped = rx_total - rx;
1398 printf("\nStatistics for device %"PRIu32" ------------------------------"
1399 "\nTX total: %"PRIu64""
1400 "\nTX dropped: %"PRIu64""
1401 "\nTX successful: %"PRIu64""
1402 "\nRX total: %"PRIu64""
1403 "\nRX dropped: %"PRIu64""
1404 "\nRX successful: %"PRIu64"",
1413 dev_ll = dev_ll->next;
1415 printf("\n======================================================\n");
1420 int init_virtio_net(struct virtio_net_device_ops const * const ops);
1423 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1424 * device is also registered here to handle the IOCTLs.
1427 main(int argc, char *argv[])
1429 struct rte_mempool *mbuf_pool;
1430 unsigned lcore_id, core_id = 0;
1431 unsigned nb_ports, valid_num_ports;
1434 static pthread_t tid;
1437 ret = rte_eal_init(argc, argv);
1439 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1443 /* parse app arguments */
1444 ret = us_vhost_parse_args(argc, argv);
1446 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1448 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1449 if (rte_lcore_is_enabled(lcore_id))
1450 lcore_ids[core_id ++] = lcore_id;
1452 if (rte_lcore_count() > RTE_MAX_LCORE)
1453 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1455 /*set the number of swithcing cores available*/
1456 num_switching_cores = rte_lcore_count()-1;
1458 /* Get the number of physical ports. */
1459 nb_ports = rte_eth_dev_count();
1460 if (nb_ports > RTE_MAX_ETHPORTS)
1461 nb_ports = RTE_MAX_ETHPORTS;
1464 * Update the global var NUM_PORTS and global array PORTS
1465 * and get value of var VALID_NUM_PORTS according to system ports number
1467 valid_num_ports = check_ports_num(nb_ports);
1469 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1470 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1471 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1475 /* Create the mbuf pool. */
1476 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
1477 NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 0,
1478 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
1479 if (mbuf_pool == NULL)
1480 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1482 /* Set log level. */
1483 rte_set_log_level(LOG_LEVEL);
1485 /* initialize all ports */
1486 for (portid = 0; portid < nb_ports; portid++) {
1487 /* skip ports that are not enabled */
1488 if ((enabled_port_mask & (1 << portid)) == 0) {
1489 RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
1492 if (port_init(portid, mbuf_pool) != 0)
1493 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
1496 /* Initialise all linked lists. */
1497 if (init_data_ll() == -1)
1498 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1500 /* Initialize device stats */
1501 memset(&dev_statistics, 0, sizeof(dev_statistics));
1503 /* Enable stats if the user option is set. */
1505 pthread_create(&tid, NULL, (void*)print_stats, NULL );
1507 /* Launch all data cores. */
1508 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1509 rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
1512 init_virtio_xen(&virtio_net_device_ops);
1514 virtio_monitor_loop();