4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
53 #include "virtio-net.h"
54 #include "xen_vhost.h"
56 #define MAX_QUEUES 128
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
65 (num_switching_cores*MAX_PKT_BURST) + \
66 (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
67 (num_switching_cores*MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 64
70 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73 * RX and TX Prefetch, Host, and Write-back threshold values should be
74 * carefully set for optimal performance. Consult the network
75 * controller's datasheet and supporting DPDK documentation for guidance
76 * on how these parameters should be set.
78 #define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
79 #define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
80 #define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
91 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
92 #define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
93 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
95 /* State of virtio device. */
96 #define DEVICE_NOT_READY 0
97 #define DEVICE_READY 1
98 #define DEVICE_SAFE_REMOVE 2
100 /* Config_core_flag status definitions. */
101 #define REQUEST_DEV_REMOVAL 1
102 #define ACK_DEV_REMOVAL 0
104 /* Configurable number of RX/TX ring descriptors */
105 #define RTE_TEST_RX_DESC_DEFAULT 128
106 #define RTE_TEST_TX_DESC_DEFAULT 512
108 #define INVALID_PORT_ID 0xFF
110 /* Max number of devices. Limited by vmdq. */
111 #define MAX_DEVICES 64
113 /* Size of buffers used for snprintfs. */
114 #define MAX_PRINT_BUFF 6072
117 /* Maximum long option length for option parsing. */
118 #define MAX_LONG_OPT_SZ 64
120 /* Used to compare MAC addresses. */
121 #define MAC_ADDR_CMP 0xFFFFFFFFFFFF
123 /* mask of enabled ports */
124 static uint32_t enabled_port_mask = 0;
126 /*Number of switching cores enabled*/
127 static uint32_t num_switching_cores = 0;
129 /* number of devices/queues to support*/
130 static uint32_t num_queues = 0;
131 uint32_t num_devices = 0;
133 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
134 static uint32_t enable_vm2vm = 1;
136 static uint32_t enable_stats = 0;
138 /* empty vmdq configuration structure. Filled in programatically */
139 static const struct rte_eth_conf vmdq_conf_default = {
141 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
143 .header_split = 0, /**< Header Split disabled */
144 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
145 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
147 * It is necessary for 1G NIC such as I350,
148 * this fixes bug of ipv4 forwarding in guest can't
149 * forward pakets from one virtio dev to another virtio dev.
151 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
152 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
153 .hw_strip_crc = 0, /**< CRC stripped by hardware */
157 .mq_mode = ETH_MQ_TX_NONE,
161 * should be overridden separately in code with
165 .nb_queue_pools = ETH_8_POOLS,
166 .enable_default_pool = 0,
169 .pool_map = {{0, 0},},
174 static unsigned lcore_ids[RTE_MAX_LCORE];
175 static uint8_t ports[RTE_MAX_ETHPORTS];
176 static unsigned num_ports = 0; /**< The number of ports specified in command line */
178 const uint16_t vlan_tags[] = {
179 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
180 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
181 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
182 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
183 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
184 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
185 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
186 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
189 /* ethernet addresses of ports */
190 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
192 /* heads for the main used and free linked lists for the data path. */
193 static struct virtio_net_data_ll *ll_root_used = NULL;
194 static struct virtio_net_data_ll *ll_root_free = NULL;
196 /* Array of data core structures containing information on individual core linked lists. */
197 static struct lcore_info lcore_info[RTE_MAX_LCORE];
199 /* Used for queueing bursts of TX packets. */
203 struct rte_mbuf *m_table[MAX_PKT_BURST];
206 /* TX queue for each data core. */
207 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
209 /* Vlan header struct used to insert vlan tags on TX. */
211 unsigned char h_dest[ETH_ALEN];
212 unsigned char h_source[ETH_ALEN];
215 __be16 h_vlan_encapsulated_proto;
218 /* Header lengths. */
220 #define VLAN_ETH_HLEN 18
222 /* Per-device statistics struct */
223 struct device_statistics {
225 rte_atomic64_t rx_total;
228 } __rte_cache_aligned;
229 struct device_statistics dev_statistics[MAX_DEVICES];
232 * Builds up the correct configuration for VMDQ VLAN pool map
233 * according to the pool & queue limits.
236 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
238 struct rte_eth_vmdq_rx_conf conf;
241 memset(&conf, 0, sizeof(conf));
242 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
243 conf.nb_pool_maps = num_devices;
245 for (i = 0; i < conf.nb_pool_maps; i++) {
246 conf.pool_map[i].vlan_id = vlan_tags[ i ];
247 conf.pool_map[i].pools = (1UL << i);
250 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
251 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
252 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
257 * Validate the device number according to the max pool number gotten form dev_info
258 * If the device number is invalid, give the error message and return -1.
259 * Each device must have its own pool.
262 validate_num_devices(uint32_t max_nb_devices)
264 if (num_devices > max_nb_devices) {
265 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
272 * Initialises a given port using global settings and with the rx buffers
273 * coming from the mbuf_pool passed as parameter
276 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
278 struct rte_eth_dev_info dev_info;
279 struct rte_eth_rxconf *rxconf;
280 struct rte_eth_conf port_conf;
281 uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
282 const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
286 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
287 rte_eth_dev_info_get (port, &dev_info);
289 /*configure the number of supported virtio devices based on VMDQ limits */
290 num_devices = dev_info.max_vmdq_pools;
291 num_queues = dev_info.max_rx_queues;
293 retval = validate_num_devices(MAX_DEVICES);
297 /* Get port configuration. */
298 retval = get_eth_conf(&port_conf, num_devices);
302 if (port >= rte_eth_dev_count()) return -1;
304 rx_rings = (uint16_t)num_queues,
305 /* Configure ethernet device. */
306 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
310 rte_eth_dev_info_get(port, &dev_info);
311 rxconf = &dev_info.default_rxconf;
312 rxconf->rx_drop_en = 1;
313 /* Setup the queues. */
314 for (q = 0; q < rx_rings; q ++) {
315 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
316 rte_eth_dev_socket_id(port), rxconf,
321 for (q = 0; q < tx_rings; q ++) {
322 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
323 rte_eth_dev_socket_id(port),
329 /* Start the device. */
330 retval = rte_eth_dev_start(port);
334 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
335 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
336 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
337 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
339 vmdq_ports_eth_addr[port].addr_bytes[0],
340 vmdq_ports_eth_addr[port].addr_bytes[1],
341 vmdq_ports_eth_addr[port].addr_bytes[2],
342 vmdq_ports_eth_addr[port].addr_bytes[3],
343 vmdq_ports_eth_addr[port].addr_bytes[4],
344 vmdq_ports_eth_addr[port].addr_bytes[5]);
350 * Parse the portmask provided at run time.
353 parse_portmask(const char *portmask)
360 /* parse hexadecimal string */
361 pm = strtoul(portmask, &end, 16);
362 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
373 * Parse num options at run time.
376 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
383 /* parse unsigned int string */
384 num = strtoul(q_arg, &end, 10);
385 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
388 if (num > max_valid_value)
399 us_vhost_usage(const char *prgname)
401 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
402 " -p PORTMASK: Set mask for ports to be used by application\n"
403 " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
404 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
409 * Parse the arguments given in the command line of the application.
412 us_vhost_parse_args(int argc, char **argv)
417 const char *prgname = argv[0];
418 static struct option long_option[] = {
419 {"vm2vm", required_argument, NULL, 0},
420 {"stats", required_argument, NULL, 0},
424 /* Parse command line */
425 while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
429 enabled_port_mask = parse_portmask(optarg);
430 if (enabled_port_mask == 0) {
431 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
432 us_vhost_usage(prgname);
438 /* Enable/disable vm2vm comms. */
439 if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
440 ret = parse_num_opt(optarg, 1);
442 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
443 us_vhost_usage(prgname);
450 /* Enable/disable stats. */
451 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
452 ret = parse_num_opt(optarg, INT32_MAX);
454 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
455 us_vhost_usage(prgname);
463 /* Invalid option - print options. */
465 us_vhost_usage(prgname);
470 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
471 if (enabled_port_mask & (1 << i))
472 ports[num_ports++] = (uint8_t)i;
475 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
476 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
477 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
485 * Update the global var NUM_PORTS and array PORTS according to system ports number
486 * and return valid ports number
488 static unsigned check_ports_num(unsigned nb_ports)
490 unsigned valid_num_ports = num_ports;
493 if (num_ports > nb_ports) {
494 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
495 num_ports, nb_ports);
496 num_ports = nb_ports;
499 for (portid = 0; portid < num_ports; portid ++) {
500 if (ports[portid] >= nb_ports) {
501 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
502 ports[portid], (nb_ports - 1));
503 ports[portid] = INVALID_PORT_ID;
507 return valid_num_ports;
511 * Macro to print out packet contents. Wrapped in debug define so that the
512 * data path is not effected when debug is disabled.
515 #define PRINT_PACKET(device, addr, size, header) do { \
516 char *pkt_addr = (char*)(addr); \
517 unsigned int index; \
518 char packet[MAX_PRINT_BUFF]; \
521 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
523 snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
524 for (index = 0; index < (size); index++) { \
525 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
526 "%02hhx ", pkt_addr[index]); \
528 snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
530 LOG_DEBUG(VHOST_DATA, "%s", packet); \
533 #define PRINT_PACKET(device, addr, size, header) do{} while(0)
537 * Function to convert guest physical addresses to vhost virtual addresses. This
538 * is used to convert virtio buffer addresses.
540 static inline uint64_t __attribute__((always_inline))
541 gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
543 struct virtio_memory_regions *region;
545 uint64_t vhost_va = 0;
547 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
548 region = &dev->mem->regions[regionidx];
549 if ((guest_pa >= region->guest_phys_address) &&
550 (guest_pa <= region->guest_phys_address_end)) {
551 vhost_va = region->address_offset + guest_pa;
555 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
556 dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
562 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
563 * be received from the physical port or from another virtio device. A packet
564 * count is returned to indicate the number of packets that were succesfully
565 * added to the RX queue.
567 static inline uint32_t __attribute__((always_inline))
568 virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
570 struct vhost_virtqueue *vq;
571 struct vring_desc *desc;
572 struct rte_mbuf *buff;
573 /* The virtio_hdr is initialised to 0. */
574 struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
575 uint64_t buff_addr = 0;
576 uint64_t buff_hdr_addr = 0;
577 uint32_t head[MAX_PKT_BURST], packet_len = 0;
578 uint32_t head_idx, packet_success = 0;
579 uint16_t avail_idx, res_cur_idx;
580 uint16_t res_base_idx, res_end_idx;
581 uint16_t free_entries;
584 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
585 vq = dev->virtqueue_rx;
586 count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
587 /* As many data cores may want access to available buffers, they need to be reserved. */
590 res_base_idx = vq->last_used_idx_res;
592 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
594 free_entries = (avail_idx - res_base_idx);
596 /*check that we have enough buffers*/
597 if (unlikely(count > free_entries))
598 count = free_entries;
603 res_end_idx = res_base_idx + count;
604 /* vq->last_used_idx_res is atomically updated. */
605 success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
607 } while (unlikely(success == 0));
608 res_cur_idx = res_base_idx;
609 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
611 /* Prefetch available ring to retrieve indexes. */
612 rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
614 /* Retrieve all of the head indexes first to avoid caching issues. */
615 for (head_idx = 0; head_idx < count; head_idx++)
616 head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
618 /*Prefetch descriptor index. */
619 rte_prefetch0(&vq->desc[head[packet_success]]);
621 while (res_cur_idx != res_end_idx) {
622 /* Get descriptor from available ring */
623 desc = &vq->desc[head[packet_success]];
624 /* Prefetch descriptor address. */
627 buff = pkts[packet_success];
629 /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
630 buff_addr = gpa_to_vva(dev, desc->addr);
631 /* Prefetch buffer address. */
632 rte_prefetch0((void*)(uintptr_t)buff_addr);
635 /* Copy virtio_hdr to packet and increment buffer address */
636 buff_hdr_addr = buff_addr;
637 packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
640 * If the descriptors are chained the header and data are placed in
643 if (desc->flags & VRING_DESC_F_NEXT) {
644 desc->len = vq->vhost_hlen;
645 desc = &vq->desc[desc->next];
646 /* Buffer address translation. */
647 buff_addr = gpa_to_vva(dev, desc->addr);
648 desc->len = rte_pktmbuf_data_len(buff);
650 buff_addr += vq->vhost_hlen;
651 desc->len = packet_len;
655 /* Update used ring with desc information */
656 vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
657 vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
659 /* Copy mbuf data to buffer */
660 rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
665 /* mergeable is disabled then a header is required per buffer. */
666 rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
667 if (res_cur_idx < res_end_idx) {
668 /* Prefetch descriptor index. */
669 rte_prefetch0(&vq->desc[head[packet_success]]);
673 rte_compiler_barrier();
675 /* Wait until it's our turn to add our buffer to the used ring. */
676 while (unlikely(vq->last_used_idx != res_base_idx))
679 *(volatile uint16_t *)&vq->used->idx += count;
681 vq->last_used_idx = res_end_idx;
687 * Compares a packet destination MAC address to a device MAC address.
689 static inline int __attribute__((always_inline))
690 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
692 return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
696 * This function registers mac along with a
697 * vlan tag to a VMDQ.
700 link_vmdq(struct virtio_net *dev)
703 struct virtio_net_data_ll *dev_ll;
705 dev_ll = ll_root_used;
707 while (dev_ll != NULL) {
708 if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
709 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
712 dev_ll = dev_ll->next;
715 /* vlan_tag currently uses the device_id. */
716 dev->vlan_tag = vlan_tags[dev->device_fh];
717 dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
719 /* Print out VMDQ registration info. */
720 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
722 dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
723 dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
724 dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
727 /* Register the MAC address. */
728 ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
730 RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
735 /* Enable stripping of the vlan tag as we handle routing. */
736 rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
738 rte_compiler_barrier();
739 /* Set device as ready for RX. */
740 dev->ready = DEVICE_READY;
746 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
747 * queue before disabling RX on the device.
750 unlink_vmdq(struct virtio_net *dev)
754 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
756 if (dev->ready == DEVICE_READY) {
757 /*clear MAC and VLAN settings*/
758 rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
759 for (i = 0; i < 6; i++)
760 dev->mac_address.addr_bytes[i] = 0;
764 /*Clear out the receive buffers*/
765 rx_count = rte_eth_rx_burst(ports[0],
766 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
769 for (i = 0; i < rx_count; i++)
770 rte_pktmbuf_free(pkts_burst[i]);
772 rx_count = rte_eth_rx_burst(ports[0],
773 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
776 dev->ready = DEVICE_NOT_READY;
781 * Check if the packet destination MAC address is for a local device. If so then put
782 * the packet on that devices RX queue. If not then return.
784 static inline unsigned __attribute__((always_inline))
785 virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
787 struct virtio_net_data_ll *dev_ll;
788 struct ether_hdr *pkt_hdr;
791 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
793 /*get the used devices list*/
794 dev_ll = ll_root_used;
796 while (dev_ll != NULL) {
797 if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
798 &dev_ll->dev->mac_address)) {
800 /* Drop the packet if the TX packet is destined for the TX device. */
801 if (dev_ll->dev->device_fh == dev->device_fh) {
802 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
803 dev_ll->dev->device_fh);
808 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
810 if (dev_ll->dev->remove) {
811 /*drop the packet if the device is marked for removal*/
812 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
814 /*send the packet to the local virtio device*/
815 ret = virtio_dev_rx(dev_ll->dev, &m, 1);
817 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
818 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
819 dev_statistics[dev->device_fh].tx_total++;
820 dev_statistics[dev->device_fh].tx += ret;
826 dev_ll = dev_ll->next;
833 * This function routes the TX packet to the correct interface. This may be a local device
834 * or the physical port.
836 static inline void __attribute__((always_inline))
837 virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
839 struct mbuf_table *tx_q;
840 struct vlan_ethhdr *vlan_hdr;
841 struct rte_mbuf **m_table;
842 struct rte_mbuf *mbuf;
844 const uint16_t lcore_id = rte_lcore_id();
846 /*check if destination is local VM*/
847 if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
851 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
853 /*Add packet to the port tx queue*/
854 tx_q = &lcore_tx_queue[lcore_id];
857 /* Allocate an mbuf and populate the structure. */
858 mbuf = rte_pktmbuf_alloc(mbuf_pool);
862 mbuf->data_len = m->data_len + VLAN_HLEN;
863 mbuf->pkt_len = mbuf->data_len;
865 /* Copy ethernet header to mbuf. */
866 rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
867 rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
870 /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
871 vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
872 vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
873 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
874 vlan_hdr->h_vlan_TCI = htons(vlan_tag);
876 /* Copy the remaining packet contents to the mbuf. */
877 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, uint8_t *) + VLAN_ETH_HLEN),
878 (const void *)(rte_pktmbuf_mtod(m, uint8_t *) + ETH_HLEN),
879 (m->data_len - ETH_HLEN));
880 tx_q->m_table[len] = mbuf;
883 dev_statistics[dev->device_fh].tx_total++;
884 dev_statistics[dev->device_fh].tx++;
887 if (unlikely(len == MAX_PKT_BURST)) {
888 m_table = (struct rte_mbuf **)tx_q->m_table;
889 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
890 /* Free any buffers not handled by TX and update the port stats. */
891 if (unlikely(ret < len)) {
893 rte_pktmbuf_free(m_table[ret]);
894 } while (++ret < len);
904 static inline void __attribute__((always_inline))
905 virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
908 struct vhost_virtqueue *vq;
909 struct vring_desc *desc;
910 uint64_t buff_addr = 0;
911 uint32_t head[MAX_PKT_BURST];
914 uint16_t free_entries, packet_success = 0;
917 vq = dev->virtqueue_tx;
918 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
920 /* If there are no available buffers then return. */
921 if (vq->last_used_idx == avail_idx)
924 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
926 /* Prefetch available ring to retrieve head indexes. */
927 rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
929 /*get the number of free entries in the ring*/
930 free_entries = avail_idx - vq->last_used_idx;
931 free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
933 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
934 /* Retrieve all of the head indexes first to avoid caching issues. */
935 for (i = 0; i < free_entries; i++)
936 head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
938 /* Prefetch descriptor index. */
939 rte_prefetch0(&vq->desc[head[packet_success]]);
941 while (packet_success < free_entries) {
942 desc = &vq->desc[head[packet_success]];
943 /* Prefetch descriptor address. */
946 if (packet_success < (free_entries - 1)) {
947 /* Prefetch descriptor index. */
948 rte_prefetch0(&vq->desc[head[packet_success+1]]);
951 /* Update used index buffer information. */
952 used_idx = vq->last_used_idx & (vq->size - 1);
953 vq->used->ring[used_idx].id = head[packet_success];
954 vq->used->ring[used_idx].len = 0;
956 /* Discard first buffer as it is the virtio header */
957 desc = &vq->desc[desc->next];
959 /* Buffer address translation. */
960 buff_addr = gpa_to_vva(dev, desc->addr);
961 /* Prefetch buffer address. */
962 rte_prefetch0((void*)(uintptr_t)buff_addr);
964 /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
965 m.data_len = desc->len;
969 virtio_tx_route(dev, &m, mbuf_pool, 0);
975 rte_compiler_barrier();
976 vq->used->idx += packet_success;
977 /* Kick guest if required. */
981 * This function is called by each data core. It handles all RX/TX registered with the
982 * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
983 * with all devices in the main linked list.
986 switch_worker(__attribute__((unused)) void *arg)
988 struct rte_mempool *mbuf_pool = arg;
989 struct virtio_net *dev = NULL;
990 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
991 struct virtio_net_data_ll *dev_ll;
992 struct mbuf_table *tx_q;
993 volatile struct lcore_ll_info *lcore_ll;
994 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
995 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
997 const uint16_t lcore_id = rte_lcore_id();
998 const uint16_t num_cores = (uint16_t)rte_lcore_count();
999 uint16_t rx_count = 0;
1001 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
1002 lcore_ll = lcore_info[lcore_id].lcore_ll;
1005 tx_q = &lcore_tx_queue[lcore_id];
1006 for (i = 0; i < num_cores; i ++) {
1007 if (lcore_ids[i] == lcore_id) {
1014 cur_tsc = rte_rdtsc();
1016 * TX burst queue drain
1018 diff_tsc = cur_tsc - prev_tsc;
1019 if (unlikely(diff_tsc > drain_tsc)) {
1022 LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
1024 /*Tx any packets in the queue*/
1025 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
1026 (struct rte_mbuf **)tx_q->m_table,
1027 (uint16_t)tx_q->len);
1028 if (unlikely(ret < tx_q->len)) {
1030 rte_pktmbuf_free(tx_q->m_table[ret]);
1031 } while (++ret < tx_q->len);
1042 * Inform the configuration core that we have exited the linked list and that no devices are
1043 * in use if requested.
1045 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
1046 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1051 dev_ll = lcore_ll->ll_root_used;
1053 while (dev_ll != NULL) {
1054 /*get virtio device ID*/
1057 if (unlikely(dev->remove)) {
1058 dev_ll = dev_ll->next;
1060 dev->ready = DEVICE_SAFE_REMOVE;
1063 if (likely(dev->ready == DEVICE_READY)) {
1065 rx_count = rte_eth_rx_burst(ports[0],
1066 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
1069 ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
1071 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
1072 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
1074 while (likely(rx_count)) {
1076 rte_pktmbuf_free_seg(pkts_burst[rx_count]);
1082 if (likely(!dev->remove))
1084 virtio_dev_tx(dev, mbuf_pool);
1086 /*move to the next device in the list*/
1087 dev_ll = dev_ll->next;
1095 * Add an entry to a used linked list. A free entry must first be found in the free linked list
1096 * using get_data_ll_free_entry();
1099 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1101 struct virtio_net_data_ll *ll = *ll_root_addr;
1103 /* Set next as NULL and use a compiler barrier to avoid reordering. */
1104 ll_dev->next = NULL;
1105 rte_compiler_barrier();
1107 /* If ll == NULL then this is the first device. */
1109 /* Increment to the tail of the linked list. */
1110 while ((ll->next != NULL) )
1115 *ll_root_addr = ll_dev;
1120 * Remove an entry from a used linked list. The entry must then be added to the free linked list
1121 * using put_data_ll_free_entry().
1124 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
1126 struct virtio_net_data_ll *ll = *ll_root_addr;
1129 *ll_root_addr = ll_dev->next;
1131 ll_dev_last->next = ll_dev->next;
1135 * Find and return an entry from the free linked list.
1137 static struct virtio_net_data_ll *
1138 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
1140 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1141 struct virtio_net_data_ll *ll_dev;
1143 if (ll_free == NULL)
1147 *ll_root_addr = ll_free->next;
1153 * Place an entry back on to the free linked list.
1156 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1158 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1160 ll_dev->next = ll_free;
1161 *ll_root_addr = ll_dev;
1165 * Creates a linked list of a given size.
1167 static struct virtio_net_data_ll *
1168 alloc_data_ll(uint32_t size)
1170 struct virtio_net_data_ll *ll_new;
1173 /* Malloc and then chain the linked list. */
1174 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
1175 if (ll_new == NULL) {
1176 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
1180 for (i = 0; i < size - 1; i++) {
1181 ll_new[i].dev = NULL;
1182 ll_new[i].next = &ll_new[i+1];
1184 ll_new[i].next = NULL;
1190 * Create the main linked list along with each individual cores linked list. A used and a free list
1191 * are created to manage entries.
1198 RTE_LCORE_FOREACH_SLAVE(lcore) {
1199 lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
1200 if (lcore_info[lcore].lcore_ll == NULL) {
1201 RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
1205 lcore_info[lcore].lcore_ll->device_num = 0;
1206 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1207 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
1208 if (num_devices % num_switching_cores)
1209 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
1211 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
1214 /* Allocate devices up to a maximum of MAX_DEVICES. */
1215 ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
1220 * Remove a device from the specific data core linked list and from the main linked list. The
1221 * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
1225 destroy_device (volatile struct virtio_net *dev)
1227 struct virtio_net_data_ll *ll_lcore_dev_cur;
1228 struct virtio_net_data_ll *ll_main_dev_cur;
1229 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
1230 struct virtio_net_data_ll *ll_main_dev_last = NULL;
1233 dev->flags &= ~VIRTIO_DEV_RUNNING;
1235 /*set the remove flag. */
1238 while(dev->ready != DEVICE_SAFE_REMOVE) {
1242 /* Search for entry to be removed from lcore ll */
1243 ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
1244 while (ll_lcore_dev_cur != NULL) {
1245 if (ll_lcore_dev_cur->dev == dev) {
1248 ll_lcore_dev_last = ll_lcore_dev_cur;
1249 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
1253 /* Search for entry to be removed from main ll */
1254 ll_main_dev_cur = ll_root_used;
1255 ll_main_dev_last = NULL;
1256 while (ll_main_dev_cur != NULL) {
1257 if (ll_main_dev_cur->dev == dev) {
1260 ll_main_dev_last = ll_main_dev_cur;
1261 ll_main_dev_cur = ll_main_dev_cur->next;
1265 if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
1266 RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
1270 /* Remove entries from the lcore and main ll. */
1271 rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
1272 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
1274 /* Set the dev_removal_flag on each lcore. */
1275 RTE_LCORE_FOREACH_SLAVE(lcore) {
1276 lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
1280 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
1281 * they can no longer access the device removed from the linked lists and that the devices
1282 * are no longer in use.
1284 RTE_LCORE_FOREACH_SLAVE(lcore) {
1285 while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
1290 /* Add the entries back to the lcore and main free ll.*/
1291 put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
1292 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
1294 /* Decrement number of device on the lcore. */
1295 lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
1297 RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
1301 * A new device is added to a data core. First the device is added to the main linked list
1302 * and the allocated to a specific data core.
1305 new_device (struct virtio_net *dev)
1307 struct virtio_net_data_ll *ll_dev;
1308 int lcore, core_add = 0;
1309 uint32_t device_num_min = num_devices;
1311 /* Add device to main ll */
1312 ll_dev = get_data_ll_free_entry(&ll_root_free);
1313 if (ll_dev == NULL) {
1314 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
1315 "of %d devices per core has been reached\n",
1316 dev->device_fh, num_devices);
1320 add_data_ll_entry(&ll_root_used, ll_dev);
1322 /*reset ready flag*/
1323 dev->ready = DEVICE_NOT_READY;
1326 /* Find a suitable lcore to add the device. */
1327 RTE_LCORE_FOREACH_SLAVE(lcore) {
1328 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
1329 device_num_min = lcore_info[lcore].lcore_ll->device_num;
1333 /* Add device to lcore ll */
1334 ll_dev->dev->coreid = core_add;
1335 ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
1336 if (ll_dev == NULL) {
1337 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
1338 destroy_device(dev);
1342 add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
1344 /* Initialize device stats */
1345 memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
1347 lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
1348 dev->flags |= VIRTIO_DEV_RUNNING;
1350 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
1358 * These callback allow devices to be added to the data core when configuration
1359 * has been fully complete.
1361 static const struct virtio_net_device_ops virtio_net_device_ops =
1363 .new_device = new_device,
1364 .destroy_device = destroy_device,
1368 * This is a thread will wake up after a period to print stats if the user has
1374 struct virtio_net_data_ll *dev_ll;
1375 uint64_t tx_dropped, rx_dropped;
1376 uint64_t tx, tx_total, rx, rx_total;
1378 const char clr[] = { 27, '[', '2', 'J', '\0' };
1379 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1382 sleep(enable_stats);
1384 /* Clear screen and move to top left */
1385 printf("%s%s", clr, top_left);
1387 printf("\nDevice statistics ====================================");
1389 dev_ll = ll_root_used;
1390 while (dev_ll != NULL) {
1391 device_fh = (uint32_t)dev_ll->dev->device_fh;
1392 tx_total = dev_statistics[device_fh].tx_total;
1393 tx = dev_statistics[device_fh].tx;
1394 tx_dropped = tx_total - tx;
1395 rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
1396 rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
1397 rx_dropped = rx_total - rx;
1399 printf("\nStatistics for device %"PRIu32" ------------------------------"
1400 "\nTX total: %"PRIu64""
1401 "\nTX dropped: %"PRIu64""
1402 "\nTX successful: %"PRIu64""
1403 "\nRX total: %"PRIu64""
1404 "\nRX dropped: %"PRIu64""
1405 "\nRX successful: %"PRIu64"",
1414 dev_ll = dev_ll->next;
1416 printf("\n======================================================\n");
1421 int init_virtio_net(struct virtio_net_device_ops const * const ops);
1424 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1425 * device is also registered here to handle the IOCTLs.
1428 main(int argc, char *argv[])
1430 struct rte_mempool *mbuf_pool;
1431 unsigned lcore_id, core_id = 0;
1432 unsigned nb_ports, valid_num_ports;
1435 static pthread_t tid;
1438 ret = rte_eal_init(argc, argv);
1440 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1444 /* parse app arguments */
1445 ret = us_vhost_parse_args(argc, argv);
1447 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1449 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1450 if (rte_lcore_is_enabled(lcore_id))
1451 lcore_ids[core_id ++] = lcore_id;
1453 if (rte_lcore_count() > RTE_MAX_LCORE)
1454 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1456 /*set the number of swithcing cores available*/
1457 num_switching_cores = rte_lcore_count()-1;
1459 /* Get the number of physical ports. */
1460 nb_ports = rte_eth_dev_count();
1461 if (nb_ports > RTE_MAX_ETHPORTS)
1462 nb_ports = RTE_MAX_ETHPORTS;
1465 * Update the global var NUM_PORTS and global array PORTS
1466 * and get value of var VALID_NUM_PORTS according to system ports number
1468 valid_num_ports = check_ports_num(nb_ports);
1470 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1471 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1472 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1476 /* Create the mbuf pool. */
1477 mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports,
1478 MBUF_SIZE, MBUF_CACHE_SIZE,
1479 sizeof(struct rte_pktmbuf_pool_private),
1480 rte_pktmbuf_pool_init, NULL,
1481 rte_pktmbuf_init, NULL,
1482 rte_socket_id(), 0);
1483 if (mbuf_pool == NULL)
1484 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1486 /* Set log level. */
1487 rte_set_log_level(LOG_LEVEL);
1489 /* initialize all ports */
1490 for (portid = 0; portid < nb_ports; portid++) {
1491 /* skip ports that are not enabled */
1492 if ((enabled_port_mask & (1 << portid)) == 0) {
1493 RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
1496 if (port_init(portid, mbuf_pool) != 0)
1497 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
1500 /* Initialise all linked lists. */
1501 if (init_data_ll() == -1)
1502 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1504 /* Initialize device stats */
1505 memset(&dev_statistics, 0, sizeof(dev_statistics));
1507 /* Enable stats if the user option is set. */
1509 pthread_create(&tid, NULL, (void*)print_stats, NULL );
1511 /* Launch all data cores. */
1512 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1513 rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
1516 init_virtio_xen(&virtio_net_device_ops);
1518 virtio_monitor_loop();