4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
53 #include "virtio-net.h"
54 #include "xen_vhost.h"
56 #define MAX_QUEUES 128
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
65 (num_switching_cores*MAX_PKT_BURST) + \
66 (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
67 (num_switching_cores*MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 64
70 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73 * RX and TX Prefetch, Host, and Write-back threshold values should be
74 * carefully set for optimal performance. Consult the network
75 * controller's datasheet and supporting DPDK documentation for guidance
76 * on how these parameters should be set.
78 #define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
79 #define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
80 #define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
91 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
92 #define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
93 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
95 /* State of virtio device. */
96 #define DEVICE_NOT_READY 0
97 #define DEVICE_READY 1
98 #define DEVICE_SAFE_REMOVE 2
100 /* Config_core_flag status definitions. */
101 #define REQUEST_DEV_REMOVAL 1
102 #define ACK_DEV_REMOVAL 0
104 /* Configurable number of RX/TX ring descriptors */
105 #define RTE_TEST_RX_DESC_DEFAULT 128
106 #define RTE_TEST_TX_DESC_DEFAULT 512
108 #define INVALID_PORT_ID 0xFF
110 /* Max number of devices. Limited by vmdq. */
111 #define MAX_DEVICES 64
113 /* Size of buffers used for snprintfs. */
114 #define MAX_PRINT_BUFF 6072
117 /* Maximum long option length for option parsing. */
118 #define MAX_LONG_OPT_SZ 64
120 /* Used to compare MAC addresses. */
121 #define MAC_ADDR_CMP 0xFFFFFFFFFFFF
123 /* mask of enabled ports */
124 static uint32_t enabled_port_mask = 0;
126 /*Number of switching cores enabled*/
127 static uint32_t num_switching_cores = 0;
129 /* number of devices/queues to support*/
130 static uint32_t num_queues = 0;
131 uint32_t num_devices = 0;
133 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
134 static uint32_t enable_vm2vm = 1;
136 static uint32_t enable_stats = 0;
138 /* Default configuration for rx and tx thresholds etc. */
139 static const struct rte_eth_rxconf rx_conf_default = {
141 .pthresh = RX_PTHRESH,
142 .hthresh = RX_HTHRESH,
143 .wthresh = RX_WTHRESH,
149 * These default values are optimized for use with the Intel(R) 82599 10 GbE
150 * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
151 * network controllers and/or network drivers.
153 static const struct rte_eth_txconf tx_conf_default = {
155 .pthresh = TX_PTHRESH,
156 .hthresh = TX_HTHRESH,
157 .wthresh = TX_WTHRESH,
159 .tx_free_thresh = 0, /* Use PMD default values */
160 .tx_rs_thresh = 0, /* Use PMD default values */
163 /* empty vmdq configuration structure. Filled in programatically */
164 static const struct rte_eth_conf vmdq_conf_default = {
166 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
168 .header_split = 0, /**< Header Split disabled */
169 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
170 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
172 * It is necessary for 1G NIC such as I350,
173 * this fixes bug of ipv4 forwarding in guest can't
174 * forward pakets from one virtio dev to another virtio dev.
176 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
177 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
178 .hw_strip_crc = 0, /**< CRC stripped by hardware */
182 .mq_mode = ETH_MQ_TX_NONE,
186 * should be overridden separately in code with
190 .nb_queue_pools = ETH_8_POOLS,
191 .enable_default_pool = 0,
194 .pool_map = {{0, 0},},
199 static unsigned lcore_ids[RTE_MAX_LCORE];
200 static uint8_t ports[RTE_MAX_ETHPORTS];
201 static unsigned num_ports = 0; /**< The number of ports specified in command line */
203 const uint16_t vlan_tags[] = {
204 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
205 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
206 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
207 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
208 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
209 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
210 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
211 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
214 /* ethernet addresses of ports */
215 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
217 /* heads for the main used and free linked lists for the data path. */
218 static struct virtio_net_data_ll *ll_root_used = NULL;
219 static struct virtio_net_data_ll *ll_root_free = NULL;
221 /* Array of data core structures containing information on individual core linked lists. */
222 static struct lcore_info lcore_info[RTE_MAX_LCORE];
224 /* Used for queueing bursts of TX packets. */
228 struct rte_mbuf *m_table[MAX_PKT_BURST];
231 /* TX queue for each data core. */
232 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
234 /* Vlan header struct used to insert vlan tags on TX. */
236 unsigned char h_dest[ETH_ALEN];
237 unsigned char h_source[ETH_ALEN];
240 __be16 h_vlan_encapsulated_proto;
243 /* Header lengths. */
245 #define VLAN_ETH_HLEN 18
247 /* Per-device statistics struct */
248 struct device_statistics {
250 rte_atomic64_t rx_total;
253 } __rte_cache_aligned;
254 struct device_statistics dev_statistics[MAX_DEVICES];
257 * Builds up the correct configuration for VMDQ VLAN pool map
258 * according to the pool & queue limits.
261 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
263 struct rte_eth_vmdq_rx_conf conf;
266 memset(&conf, 0, sizeof(conf));
267 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
268 conf.nb_pool_maps = num_devices;
270 for (i = 0; i < conf.nb_pool_maps; i++) {
271 conf.pool_map[i].vlan_id = vlan_tags[ i ];
272 conf.pool_map[i].pools = (1UL << i);
275 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
276 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
277 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
282 * Validate the device number according to the max pool number gotten form dev_info
283 * If the device number is invalid, give the error message and return -1.
284 * Each device must have its own pool.
287 validate_num_devices(uint32_t max_nb_devices)
289 if (num_devices > max_nb_devices) {
290 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
297 * Initialises a given port using global settings and with the rx buffers
298 * coming from the mbuf_pool passed as parameter
301 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
303 struct rte_eth_dev_info dev_info;
304 struct rte_eth_conf port_conf;
305 uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
306 const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
310 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
311 rte_eth_dev_info_get (port, &dev_info);
313 /*configure the number of supported virtio devices based on VMDQ limits */
314 num_devices = dev_info.max_vmdq_pools;
315 num_queues = dev_info.max_rx_queues;
317 retval = validate_num_devices(MAX_DEVICES);
321 /* Get port configuration. */
322 retval = get_eth_conf(&port_conf, num_devices);
326 if (port >= rte_eth_dev_count()) return -1;
328 rx_rings = (uint16_t)num_queues,
329 /* Configure ethernet device. */
330 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
334 /* Setup the queues. */
335 for (q = 0; q < rx_rings; q ++) {
336 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
337 rte_eth_dev_socket_id(port), &rx_conf_default,
342 for (q = 0; q < tx_rings; q ++) {
343 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
344 rte_eth_dev_socket_id(port), &tx_conf_default);
349 /* Start the device. */
350 retval = rte_eth_dev_start(port);
354 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
355 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
356 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
357 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
359 vmdq_ports_eth_addr[port].addr_bytes[0],
360 vmdq_ports_eth_addr[port].addr_bytes[1],
361 vmdq_ports_eth_addr[port].addr_bytes[2],
362 vmdq_ports_eth_addr[port].addr_bytes[3],
363 vmdq_ports_eth_addr[port].addr_bytes[4],
364 vmdq_ports_eth_addr[port].addr_bytes[5]);
370 * Parse the portmask provided at run time.
373 parse_portmask(const char *portmask)
380 /* parse hexadecimal string */
381 pm = strtoul(portmask, &end, 16);
382 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
393 * Parse num options at run time.
396 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
403 /* parse unsigned int string */
404 num = strtoul(q_arg, &end, 10);
405 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
408 if (num > max_valid_value)
419 us_vhost_usage(const char *prgname)
421 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
422 " -p PORTMASK: Set mask for ports to be used by application\n"
423 " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
424 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
429 * Parse the arguments given in the command line of the application.
432 us_vhost_parse_args(int argc, char **argv)
437 const char *prgname = argv[0];
438 static struct option long_option[] = {
439 {"vm2vm", required_argument, NULL, 0},
440 {"stats", required_argument, NULL, 0},
444 /* Parse command line */
445 while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
449 enabled_port_mask = parse_portmask(optarg);
450 if (enabled_port_mask == 0) {
451 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
452 us_vhost_usage(prgname);
458 /* Enable/disable vm2vm comms. */
459 if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
460 ret = parse_num_opt(optarg, 1);
462 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
463 us_vhost_usage(prgname);
470 /* Enable/disable stats. */
471 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
472 ret = parse_num_opt(optarg, INT32_MAX);
474 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
475 us_vhost_usage(prgname);
483 /* Invalid option - print options. */
485 us_vhost_usage(prgname);
490 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
491 if (enabled_port_mask & (1 << i))
492 ports[num_ports++] = (uint8_t)i;
495 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
496 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
497 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
505 * Update the global var NUM_PORTS and array PORTS according to system ports number
506 * and return valid ports number
508 static unsigned check_ports_num(unsigned nb_ports)
510 unsigned valid_num_ports = num_ports;
513 if (num_ports > nb_ports) {
514 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
515 num_ports, nb_ports);
516 num_ports = nb_ports;
519 for (portid = 0; portid < num_ports; portid ++) {
520 if (ports[portid] >= nb_ports) {
521 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
522 ports[portid], (nb_ports - 1));
523 ports[portid] = INVALID_PORT_ID;
527 return valid_num_ports;
531 * Macro to print out packet contents. Wrapped in debug define so that the
532 * data path is not effected when debug is disabled.
535 #define PRINT_PACKET(device, addr, size, header) do { \
536 char *pkt_addr = (char*)(addr); \
537 unsigned int index; \
538 char packet[MAX_PRINT_BUFF]; \
541 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
543 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
544 for (index = 0; index < (size); index++) { \
545 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
546 "%02hhx ", pkt_addr[index]); \
548 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
550 LOG_DEBUG(VHOST_DATA, "%s", packet); \
553 #define PRINT_PACKET(device, addr, size, header) do{} while(0)
557 * Function to convert guest physical addresses to vhost virtual addresses. This
558 * is used to convert virtio buffer addresses.
560 static inline uint64_t __attribute__((always_inline))
561 gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
563 struct virtio_memory_regions *region;
565 uint64_t vhost_va = 0;
567 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
568 region = &dev->mem->regions[regionidx];
569 if ((guest_pa >= region->guest_phys_address) &&
570 (guest_pa <= region->guest_phys_address_end)) {
571 vhost_va = region->address_offset + guest_pa;
575 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
576 dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
582 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
583 * be received from the physical port or from another virtio device. A packet
584 * count is returned to indicate the number of packets that were succesfully
585 * added to the RX queue.
587 static inline uint32_t __attribute__((always_inline))
588 virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
590 struct vhost_virtqueue *vq;
591 struct vring_desc *desc;
592 struct rte_mbuf *buff;
593 /* The virtio_hdr is initialised to 0. */
594 struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
595 uint64_t buff_addr = 0;
596 uint64_t buff_hdr_addr = 0;
597 uint32_t head[MAX_PKT_BURST], packet_len = 0;
598 uint32_t head_idx, packet_success = 0;
599 uint16_t avail_idx, res_cur_idx;
600 uint16_t res_base_idx, res_end_idx;
601 uint16_t free_entries;
604 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
605 vq = dev->virtqueue_rx;
606 count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
607 /* As many data cores may want access to available buffers, they need to be reserved. */
610 res_base_idx = vq->last_used_idx_res;
612 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
614 free_entries = (avail_idx - res_base_idx);
616 /*check that we have enough buffers*/
617 if (unlikely(count > free_entries))
618 count = free_entries;
623 res_end_idx = res_base_idx + count;
624 /* vq->last_used_idx_res is atomically updated. */
625 success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
627 } while (unlikely(success == 0));
628 res_cur_idx = res_base_idx;
629 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
631 /* Prefetch available ring to retrieve indexes. */
632 rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
634 /* Retrieve all of the head indexes first to avoid caching issues. */
635 for (head_idx = 0; head_idx < count; head_idx++)
636 head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
638 /*Prefetch descriptor index. */
639 rte_prefetch0(&vq->desc[head[packet_success]]);
641 while (res_cur_idx != res_end_idx) {
642 /* Get descriptor from available ring */
643 desc = &vq->desc[head[packet_success]];
644 /* Prefetch descriptor address. */
647 buff = pkts[packet_success];
649 /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
650 buff_addr = gpa_to_vva(dev, desc->addr);
651 /* Prefetch buffer address. */
652 rte_prefetch0((void*)(uintptr_t)buff_addr);
655 /* Copy virtio_hdr to packet and increment buffer address */
656 buff_hdr_addr = buff_addr;
657 packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
660 * If the descriptors are chained the header and data are placed in
663 if (desc->flags & VRING_DESC_F_NEXT) {
664 desc->len = vq->vhost_hlen;
665 desc = &vq->desc[desc->next];
666 /* Buffer address translation. */
667 buff_addr = gpa_to_vva(dev, desc->addr);
668 desc->len = rte_pktmbuf_data_len(buff);
670 buff_addr += vq->vhost_hlen;
671 desc->len = packet_len;
675 /* Update used ring with desc information */
676 vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
677 vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
679 /* Copy mbuf data to buffer */
680 rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
685 /* mergeable is disabled then a header is required per buffer. */
686 rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
687 if (res_cur_idx < res_end_idx) {
688 /* Prefetch descriptor index. */
689 rte_prefetch0(&vq->desc[head[packet_success]]);
693 rte_compiler_barrier();
695 /* Wait until it's our turn to add our buffer to the used ring. */
696 while (unlikely(vq->last_used_idx != res_base_idx))
699 *(volatile uint16_t *)&vq->used->idx += count;
701 vq->last_used_idx = res_end_idx;
707 * Compares a packet destination MAC address to a device MAC address.
709 static inline int __attribute__((always_inline))
710 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
712 return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
716 * This function registers mac along with a
717 * vlan tag to a VMDQ.
720 link_vmdq(struct virtio_net *dev)
723 struct virtio_net_data_ll *dev_ll;
725 dev_ll = ll_root_used;
727 while (dev_ll != NULL) {
728 if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
729 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
732 dev_ll = dev_ll->next;
735 /* vlan_tag currently uses the device_id. */
736 dev->vlan_tag = vlan_tags[dev->device_fh];
737 dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
739 /* Print out VMDQ registration info. */
740 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
742 dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
743 dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
744 dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
747 /* Register the MAC address. */
748 ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
750 RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
755 /* Enable stripping of the vlan tag as we handle routing. */
756 rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
758 rte_compiler_barrier();
759 /* Set device as ready for RX. */
760 dev->ready = DEVICE_READY;
766 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
767 * queue before disabling RX on the device.
770 unlink_vmdq(struct virtio_net *dev)
774 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
776 if (dev->ready == DEVICE_READY) {
777 /*clear MAC and VLAN settings*/
778 rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
779 for (i = 0; i < 6; i++)
780 dev->mac_address.addr_bytes[i] = 0;
784 /*Clear out the receive buffers*/
785 rx_count = rte_eth_rx_burst(ports[0],
786 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
789 for (i = 0; i < rx_count; i++)
790 rte_pktmbuf_free(pkts_burst[i]);
792 rx_count = rte_eth_rx_burst(ports[0],
793 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
796 dev->ready = DEVICE_NOT_READY;
801 * Check if the packet destination MAC address is for a local device. If so then put
802 * the packet on that devices RX queue. If not then return.
804 static inline unsigned __attribute__((always_inline))
805 virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
807 struct virtio_net_data_ll *dev_ll;
808 struct ether_hdr *pkt_hdr;
811 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
813 /*get the used devices list*/
814 dev_ll = ll_root_used;
816 while (dev_ll != NULL) {
817 if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
818 &dev_ll->dev->mac_address)) {
820 /* Drop the packet if the TX packet is destined for the TX device. */
821 if (dev_ll->dev->device_fh == dev->device_fh) {
822 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
823 dev_ll->dev->device_fh);
828 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
830 if (dev_ll->dev->remove) {
831 /*drop the packet if the device is marked for removal*/
832 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
834 /*send the packet to the local virtio device*/
835 ret = virtio_dev_rx(dev_ll->dev, &m, 1);
837 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
838 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
839 dev_statistics[dev->device_fh].tx_total++;
840 dev_statistics[dev->device_fh].tx += ret;
846 dev_ll = dev_ll->next;
853 * This function routes the TX packet to the correct interface. This may be a local device
854 * or the physical port.
856 static inline void __attribute__((always_inline))
857 virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
859 struct mbuf_table *tx_q;
860 struct vlan_ethhdr *vlan_hdr;
861 struct rte_mbuf **m_table;
862 struct rte_mbuf *mbuf;
864 const uint16_t lcore_id = rte_lcore_id();
866 /*check if destination is local VM*/
867 if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
871 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
873 /*Add packet to the port tx queue*/
874 tx_q = &lcore_tx_queue[lcore_id];
877 /* Allocate an mbuf and populate the structure. */
878 mbuf = rte_pktmbuf_alloc(mbuf_pool);
882 mbuf->data_len = m->data_len + VLAN_HLEN;
883 mbuf->pkt_len = mbuf->data_len;
885 /* Copy ethernet header to mbuf. */
886 rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
887 rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
890 /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
891 vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
892 vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
893 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
894 vlan_hdr->h_vlan_TCI = htons(vlan_tag);
896 /* Copy the remaining packet contents to the mbuf. */
897 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, uint8_t *) + VLAN_ETH_HLEN),
898 (const void *)(rte_pktmbuf_mtod(m, uint8_t *) + ETH_HLEN),
899 (m->data_len - ETH_HLEN));
900 tx_q->m_table[len] = mbuf;
903 dev_statistics[dev->device_fh].tx_total++;
904 dev_statistics[dev->device_fh].tx++;
907 if (unlikely(len == MAX_PKT_BURST)) {
908 m_table = (struct rte_mbuf **)tx_q->m_table;
909 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
910 /* Free any buffers not handled by TX and update the port stats. */
911 if (unlikely(ret < len)) {
913 rte_pktmbuf_free(m_table[ret]);
914 } while (++ret < len);
924 static inline void __attribute__((always_inline))
925 virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
928 struct vhost_virtqueue *vq;
929 struct vring_desc *desc;
930 uint64_t buff_addr = 0;
931 uint32_t head[MAX_PKT_BURST];
934 uint16_t free_entries, packet_success = 0;
937 vq = dev->virtqueue_tx;
938 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
940 /* If there are no available buffers then return. */
941 if (vq->last_used_idx == avail_idx)
944 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
946 /* Prefetch available ring to retrieve head indexes. */
947 rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
949 /*get the number of free entries in the ring*/
950 free_entries = avail_idx - vq->last_used_idx;
951 free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
953 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
954 /* Retrieve all of the head indexes first to avoid caching issues. */
955 for (i = 0; i < free_entries; i++)
956 head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
958 /* Prefetch descriptor index. */
959 rte_prefetch0(&vq->desc[head[packet_success]]);
961 while (packet_success < free_entries) {
962 desc = &vq->desc[head[packet_success]];
963 /* Prefetch descriptor address. */
966 if (packet_success < (free_entries - 1)) {
967 /* Prefetch descriptor index. */
968 rte_prefetch0(&vq->desc[head[packet_success+1]]);
971 /* Update used index buffer information. */
972 used_idx = vq->last_used_idx & (vq->size - 1);
973 vq->used->ring[used_idx].id = head[packet_success];
974 vq->used->ring[used_idx].len = 0;
976 /* Discard first buffer as it is the virtio header */
977 desc = &vq->desc[desc->next];
979 /* Buffer address translation. */
980 buff_addr = gpa_to_vva(dev, desc->addr);
981 /* Prefetch buffer address. */
982 rte_prefetch0((void*)(uintptr_t)buff_addr);
984 /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
985 m.data_len = desc->len;
989 virtio_tx_route(dev, &m, mbuf_pool, 0);
995 rte_compiler_barrier();
996 vq->used->idx += packet_success;
997 /* Kick guest if required. */
1001 * This function is called by each data core. It handles all RX/TX registered with the
1002 * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
1003 * with all devices in the main linked list.
1006 switch_worker(__attribute__((unused)) void *arg)
1008 struct rte_mempool *mbuf_pool = arg;
1009 struct virtio_net *dev = NULL;
1010 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1011 struct virtio_net_data_ll *dev_ll;
1012 struct mbuf_table *tx_q;
1013 volatile struct lcore_ll_info *lcore_ll;
1014 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
1015 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
1017 const uint16_t lcore_id = rte_lcore_id();
1018 const uint16_t num_cores = (uint16_t)rte_lcore_count();
1019 uint16_t rx_count = 0;
1021 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
1022 lcore_ll = lcore_info[lcore_id].lcore_ll;
1025 tx_q = &lcore_tx_queue[lcore_id];
1026 for (i = 0; i < num_cores; i ++) {
1027 if (lcore_ids[i] == lcore_id) {
1034 cur_tsc = rte_rdtsc();
1036 * TX burst queue drain
1038 diff_tsc = cur_tsc - prev_tsc;
1039 if (unlikely(diff_tsc > drain_tsc)) {
1042 LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
1044 /*Tx any packets in the queue*/
1045 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
1046 (struct rte_mbuf **)tx_q->m_table,
1047 (uint16_t)tx_q->len);
1048 if (unlikely(ret < tx_q->len)) {
1050 rte_pktmbuf_free(tx_q->m_table[ret]);
1051 } while (++ret < tx_q->len);
1062 * Inform the configuration core that we have exited the linked list and that no devices are
1063 * in use if requested.
1065 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
1066 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1071 dev_ll = lcore_ll->ll_root_used;
1073 while (dev_ll != NULL) {
1074 /*get virtio device ID*/
1077 if (unlikely(dev->remove)) {
1078 dev_ll = dev_ll->next;
1080 dev->ready = DEVICE_SAFE_REMOVE;
1083 if (likely(dev->ready == DEVICE_READY)) {
1085 rx_count = rte_eth_rx_burst(ports[0],
1086 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
1089 ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
1091 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
1092 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
1094 while (likely(rx_count)) {
1096 rte_pktmbuf_free_seg(pkts_burst[rx_count]);
1102 if (likely(!dev->remove))
1104 virtio_dev_tx(dev, mbuf_pool);
1106 /*move to the next device in the list*/
1107 dev_ll = dev_ll->next;
1115 * Add an entry to a used linked list. A free entry must first be found in the free linked list
1116 * using get_data_ll_free_entry();
1119 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1121 struct virtio_net_data_ll *ll = *ll_root_addr;
1123 /* Set next as NULL and use a compiler barrier to avoid reordering. */
1124 ll_dev->next = NULL;
1125 rte_compiler_barrier();
1127 /* If ll == NULL then this is the first device. */
1129 /* Increment to the tail of the linked list. */
1130 while ((ll->next != NULL) )
1135 *ll_root_addr = ll_dev;
1140 * Remove an entry from a used linked list. The entry must then be added to the free linked list
1141 * using put_data_ll_free_entry().
1144 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
1146 struct virtio_net_data_ll *ll = *ll_root_addr;
1149 *ll_root_addr = ll_dev->next;
1151 ll_dev_last->next = ll_dev->next;
1155 * Find and return an entry from the free linked list.
1157 static struct virtio_net_data_ll *
1158 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
1160 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1161 struct virtio_net_data_ll *ll_dev;
1163 if (ll_free == NULL)
1167 *ll_root_addr = ll_free->next;
1173 * Place an entry back on to the free linked list.
1176 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1178 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1180 ll_dev->next = ll_free;
1181 *ll_root_addr = ll_dev;
1185 * Creates a linked list of a given size.
1187 static struct virtio_net_data_ll *
1188 alloc_data_ll(uint32_t size)
1190 struct virtio_net_data_ll *ll_new;
1193 /* Malloc and then chain the linked list. */
1194 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
1195 if (ll_new == NULL) {
1196 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
1200 for (i = 0; i < size - 1; i++) {
1201 ll_new[i].dev = NULL;
1202 ll_new[i].next = &ll_new[i+1];
1204 ll_new[i].next = NULL;
1210 * Create the main linked list along with each individual cores linked list. A used and a free list
1211 * are created to manage entries.
1218 RTE_LCORE_FOREACH_SLAVE(lcore) {
1219 lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
1220 if (lcore_info[lcore].lcore_ll == NULL) {
1221 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
1225 lcore_info[lcore].lcore_ll->device_num = 0;
1226 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1227 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
1228 if (num_devices % num_switching_cores)
1229 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
1231 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
1234 /* Allocate devices up to a maximum of MAX_DEVICES. */
1235 ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
1240 * Remove a device from the specific data core linked list and from the main linked list. The
1241 * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
1245 destroy_device (volatile struct virtio_net *dev)
1247 struct virtio_net_data_ll *ll_lcore_dev_cur;
1248 struct virtio_net_data_ll *ll_main_dev_cur;
1249 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
1250 struct virtio_net_data_ll *ll_main_dev_last = NULL;
1253 dev->flags &= ~VIRTIO_DEV_RUNNING;
1255 /*set the remove flag. */
1258 while(dev->ready != DEVICE_SAFE_REMOVE) {
1262 /* Search for entry to be removed from lcore ll */
1263 ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
1264 while (ll_lcore_dev_cur != NULL) {
1265 if (ll_lcore_dev_cur->dev == dev) {
1268 ll_lcore_dev_last = ll_lcore_dev_cur;
1269 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
1273 /* Search for entry to be removed from main ll */
1274 ll_main_dev_cur = ll_root_used;
1275 ll_main_dev_last = NULL;
1276 while (ll_main_dev_cur != NULL) {
1277 if (ll_main_dev_cur->dev == dev) {
1280 ll_main_dev_last = ll_main_dev_cur;
1281 ll_main_dev_cur = ll_main_dev_cur->next;
1285 if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
1286 RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
1290 /* Remove entries from the lcore and main ll. */
1291 rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
1292 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
1294 /* Set the dev_removal_flag on each lcore. */
1295 RTE_LCORE_FOREACH_SLAVE(lcore) {
1296 lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
1300 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
1301 * they can no longer access the device removed from the linked lists and that the devices
1302 * are no longer in use.
1304 RTE_LCORE_FOREACH_SLAVE(lcore) {
1305 while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
1310 /* Add the entries back to the lcore and main free ll.*/
1311 put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
1312 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
1314 /* Decrement number of device on the lcore. */
1315 lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
1317 RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
1321 * A new device is added to a data core. First the device is added to the main linked list
1322 * and the allocated to a specific data core.
1325 new_device (struct virtio_net *dev)
1327 struct virtio_net_data_ll *ll_dev;
1328 int lcore, core_add = 0;
1329 uint32_t device_num_min = num_devices;
1331 /* Add device to main ll */
1332 ll_dev = get_data_ll_free_entry(&ll_root_free);
1333 if (ll_dev == NULL) {
1334 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
1335 "of %d devices per core has been reached\n",
1336 dev->device_fh, num_devices);
1340 add_data_ll_entry(&ll_root_used, ll_dev);
1342 /*reset ready flag*/
1343 dev->ready = DEVICE_NOT_READY;
1346 /* Find a suitable lcore to add the device. */
1347 RTE_LCORE_FOREACH_SLAVE(lcore) {
1348 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
1349 device_num_min = lcore_info[lcore].lcore_ll->device_num;
1353 /* Add device to lcore ll */
1354 ll_dev->dev->coreid = core_add;
1355 ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
1356 if (ll_dev == NULL) {
1357 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
1358 destroy_device(dev);
1362 add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
1364 /* Initialize device stats */
1365 memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
1367 lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
1368 dev->flags |= VIRTIO_DEV_RUNNING;
1370 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
1378 * These callback allow devices to be added to the data core when configuration
1379 * has been fully complete.
1381 static const struct virtio_net_device_ops virtio_net_device_ops =
1383 .new_device = new_device,
1384 .destroy_device = destroy_device,
1388 * This is a thread will wake up after a period to print stats if the user has
1394 struct virtio_net_data_ll *dev_ll;
1395 uint64_t tx_dropped, rx_dropped;
1396 uint64_t tx, tx_total, rx, rx_total;
1398 const char clr[] = { 27, '[', '2', 'J', '\0' };
1399 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1402 sleep(enable_stats);
1404 /* Clear screen and move to top left */
1405 printf("%s%s", clr, top_left);
1407 printf("\nDevice statistics ====================================");
1409 dev_ll = ll_root_used;
1410 while (dev_ll != NULL) {
1411 device_fh = (uint32_t)dev_ll->dev->device_fh;
1412 tx_total = dev_statistics[device_fh].tx_total;
1413 tx = dev_statistics[device_fh].tx;
1414 tx_dropped = tx_total - tx;
1415 rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
1416 rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
1417 rx_dropped = rx_total - rx;
1419 printf("\nStatistics for device %"PRIu32" ------------------------------"
1420 "\nTX total: %"PRIu64""
1421 "\nTX dropped: %"PRIu64""
1422 "\nTX successful: %"PRIu64""
1423 "\nRX total: %"PRIu64""
1424 "\nRX dropped: %"PRIu64""
1425 "\nRX successful: %"PRIu64"",
1434 dev_ll = dev_ll->next;
1436 printf("\n======================================================\n");
1441 int init_virtio_net(struct virtio_net_device_ops const * const ops);
1444 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1445 * device is also registered here to handle the IOCTLs.
1448 MAIN(int argc, char *argv[])
1450 struct rte_mempool *mbuf_pool;
1451 unsigned lcore_id, core_id = 0;
1452 unsigned nb_ports, valid_num_ports;
1455 static pthread_t tid;
1458 ret = rte_eal_init(argc, argv);
1460 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1464 /* parse app arguments */
1465 ret = us_vhost_parse_args(argc, argv);
1467 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1469 if (rte_eal_pci_probe() != 0)
1470 rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n");
1472 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1473 if (rte_lcore_is_enabled(lcore_id))
1474 lcore_ids[core_id ++] = lcore_id;
1476 if (rte_lcore_count() > RTE_MAX_LCORE)
1477 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1479 /*set the number of swithcing cores available*/
1480 num_switching_cores = rte_lcore_count()-1;
1482 /* Get the number of physical ports. */
1483 nb_ports = rte_eth_dev_count();
1484 if (nb_ports > RTE_MAX_ETHPORTS)
1485 nb_ports = RTE_MAX_ETHPORTS;
1488 * Update the global var NUM_PORTS and global array PORTS
1489 * and get value of var VALID_NUM_PORTS according to system ports number
1491 valid_num_ports = check_ports_num(nb_ports);
1493 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1494 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1495 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1499 /* Create the mbuf pool. */
1500 mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports,
1501 MBUF_SIZE, MBUF_CACHE_SIZE,
1502 sizeof(struct rte_pktmbuf_pool_private),
1503 rte_pktmbuf_pool_init, NULL,
1504 rte_pktmbuf_init, NULL,
1505 rte_socket_id(), 0);
1506 if (mbuf_pool == NULL)
1507 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1509 /* Set log level. */
1510 rte_set_log_level(LOG_LEVEL);
1512 /* initialize all ports */
1513 for (portid = 0; portid < nb_ports; portid++) {
1514 /* skip ports that are not enabled */
1515 if ((enabled_port_mask & (1 << portid)) == 0) {
1516 RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
1519 if (port_init(portid, mbuf_pool) != 0)
1520 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
1523 /* Initialise all linked lists. */
1524 if (init_data_ll() == -1)
1525 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1527 /* Initialize device stats */
1528 memset(&dev_statistics, 0, sizeof(dev_statistics));
1530 /* Enable stats if the user option is set. */
1532 pthread_create(&tid, NULL, (void*)print_stats, NULL );
1534 /* Launch all data cores. */
1535 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1536 rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
1539 init_virtio_xen(&virtio_net_device_ops);
1541 virtio_monitor_loop();