4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
53 #include "virtio-net.h"
54 #include "xen_vhost.h"
56 #define MAX_QUEUES 128
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
65 (num_switching_cores*MAX_PKT_BURST) + \
66 (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
67 (num_switching_cores*MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 64
70 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73 * RX and TX Prefetch, Host, and Write-back threshold values should be
74 * carefully set for optimal performance. Consult the network
75 * controller's datasheet and supporting DPDK documentation for guidance
76 * on how these parameters should be set.
78 #define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
79 #define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
80 #define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
91 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
92 #define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
93 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
95 /* State of virtio device. */
96 #define DEVICE_NOT_READY 0
97 #define DEVICE_READY 1
98 #define DEVICE_SAFE_REMOVE 2
100 /* Config_core_flag status definitions. */
101 #define REQUEST_DEV_REMOVAL 1
102 #define ACK_DEV_REMOVAL 0
104 /* Configurable number of RX/TX ring descriptors */
105 #define RTE_TEST_RX_DESC_DEFAULT 128
106 #define RTE_TEST_TX_DESC_DEFAULT 512
108 #define INVALID_PORT_ID 0xFF
110 /* Max number of devices. Limited by vmdq. */
111 #define MAX_DEVICES 64
113 /* Size of buffers used for rte_snprintfs. */
114 #define MAX_PRINT_BUFF 6072
117 /* Maximum long option length for option parsing. */
118 #define MAX_LONG_OPT_SZ 64
120 /* Used to compare MAC addresses. */
121 #define MAC_ADDR_CMP 0xFFFFFFFFFFFF
123 /* mask of enabled ports */
124 static uint32_t enabled_port_mask = 0;
126 /*Number of switching cores enabled*/
127 static uint32_t num_switching_cores = 0;
129 /* number of devices/queues to support*/
130 static uint32_t num_queues = 0;
131 uint32_t num_devices = 0;
133 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
134 static uint32_t enable_vm2vm = 1;
136 static uint32_t enable_stats = 0;
138 /* Default configuration for rx and tx thresholds etc. */
139 static const struct rte_eth_rxconf rx_conf_default = {
141 .pthresh = RX_PTHRESH,
142 .hthresh = RX_HTHRESH,
143 .wthresh = RX_WTHRESH,
149 * These default values are optimized for use with the Intel(R) 82599 10 GbE
150 * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
151 * network controllers and/or network drivers.
153 static const struct rte_eth_txconf tx_conf_default = {
155 .pthresh = TX_PTHRESH,
156 .hthresh = TX_HTHRESH,
157 .wthresh = TX_WTHRESH,
159 .tx_free_thresh = 0, /* Use PMD default values */
160 .tx_rs_thresh = 0, /* Use PMD default values */
163 /* empty vmdq configuration structure. Filled in programatically */
164 static const struct rte_eth_conf vmdq_conf_default = {
166 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
168 .header_split = 0, /**< Header Split disabled */
169 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
170 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
172 * It is necessary for 1G NIC such as I350,
173 * this fixes bug of ipv4 forwarding in guest can't
174 * forward pakets from one virtio dev to another virtio dev.
176 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
177 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
178 .hw_strip_crc = 0, /**< CRC stripped by hardware */
182 .mq_mode = ETH_MQ_TX_NONE,
186 * should be overridden separately in code with
190 .nb_queue_pools = ETH_8_POOLS,
191 .enable_default_pool = 0,
194 .pool_map = {{0, 0},},
199 static unsigned lcore_ids[RTE_MAX_LCORE];
200 static uint8_t ports[RTE_MAX_ETHPORTS];
201 static unsigned num_ports = 0; /**< The number of ports specified in command line */
203 const uint16_t vlan_tags[] = {
204 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
205 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
206 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
207 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
208 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
209 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
210 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
211 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
214 /* ethernet addresses of ports */
215 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
217 /* heads for the main used and free linked lists for the data path. */
218 static struct virtio_net_data_ll *ll_root_used = NULL;
219 static struct virtio_net_data_ll *ll_root_free = NULL;
221 /* Array of data core structures containing information on individual core linked lists. */
222 static struct lcore_info lcore_info[RTE_MAX_LCORE];
224 /* Used for queueing bursts of TX packets. */
228 struct rte_mbuf *m_table[MAX_PKT_BURST];
231 /* TX queue for each data core. */
232 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
234 /* Vlan header struct used to insert vlan tags on TX. */
236 unsigned char h_dest[ETH_ALEN];
237 unsigned char h_source[ETH_ALEN];
240 __be16 h_vlan_encapsulated_proto;
243 /* Header lengths. */
245 #define VLAN_ETH_HLEN 18
247 /* Per-device statistics struct */
248 struct device_statistics {
250 rte_atomic64_t rx_total;
253 } __rte_cache_aligned;
254 struct device_statistics dev_statistics[MAX_DEVICES];
257 * Builds up the correct configuration for VMDQ VLAN pool map
258 * according to the pool & queue limits.
261 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
263 struct rte_eth_vmdq_rx_conf conf;
266 memset(&conf, 0, sizeof(conf));
267 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
268 conf.nb_pool_maps = num_devices;
270 for (i = 0; i < conf.nb_pool_maps; i++) {
271 conf.pool_map[i].vlan_id = vlan_tags[ i ];
272 conf.pool_map[i].pools = (1UL << i);
275 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
276 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
277 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
282 * Validate the device number according to the max pool number gotten form dev_info
283 * If the device number is invalid, give the error message and return -1.
284 * Each device must have its own pool.
287 validate_num_devices(uint32_t max_nb_devices)
289 if (num_devices > max_nb_devices) {
290 RTE_LOG(ERR, PORT, "invalid number of devices\n");
297 * Initialises a given port using global settings and with the rx buffers
298 * coming from the mbuf_pool passed as parameter
301 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
303 struct rte_eth_dev_info dev_info;
304 struct rte_eth_conf port_conf;
305 uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
306 const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
310 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
311 rte_eth_dev_info_get (port, &dev_info);
313 /*configure the number of supported virtio devices based on VMDQ limits */
314 num_devices = dev_info.max_vmdq_pools;
315 num_queues = dev_info.max_rx_queues;
317 retval = validate_num_devices(MAX_DEVICES);
321 /* Get port configuration. */
322 retval = get_eth_conf(&port_conf, num_devices);
326 if (port >= rte_eth_dev_count()) return -1;
328 rx_rings = (uint16_t)num_queues,
329 /* Configure ethernet device. */
330 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
334 /* Setup the queues. */
335 for (q = 0; q < rx_rings; q ++) {
336 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
337 rte_eth_dev_socket_id(port), &rx_conf_default,
342 for (q = 0; q < tx_rings; q ++) {
343 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
344 rte_eth_dev_socket_id(port), &tx_conf_default);
349 /* Start the device. */
350 retval = rte_eth_dev_start(port);
354 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
355 RTE_LOG(INFO, PORT, "Max virtio devices supported: %u\n", num_devices);
356 RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
357 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
359 vmdq_ports_eth_addr[port].addr_bytes[0],
360 vmdq_ports_eth_addr[port].addr_bytes[1],
361 vmdq_ports_eth_addr[port].addr_bytes[2],
362 vmdq_ports_eth_addr[port].addr_bytes[3],
363 vmdq_ports_eth_addr[port].addr_bytes[4],
364 vmdq_ports_eth_addr[port].addr_bytes[5]);
370 * Parse the portmask provided at run time.
373 parse_portmask(const char *portmask)
380 /* parse hexadecimal string */
381 pm = strtoul(portmask, &end, 16);
382 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
393 * Parse num options at run time.
396 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
403 /* parse unsigned int string */
404 num = strtoul(q_arg, &end, 10);
405 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
408 if (num > max_valid_value)
419 us_vhost_usage(const char *prgname)
421 RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
422 " -p PORTMASK: Set mask for ports to be used by application\n"
423 " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
424 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
429 * Parse the arguments given in the command line of the application.
432 us_vhost_parse_args(int argc, char **argv)
437 const char *prgname = argv[0];
438 static struct option long_option[] = {
439 {"vm2vm", required_argument, NULL, 0},
440 {"stats", required_argument, NULL, 0},
444 /* Parse command line */
445 while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
449 enabled_port_mask = parse_portmask(optarg);
450 if (enabled_port_mask == 0) {
451 RTE_LOG(INFO, CONFIG, "Invalid portmask\n");
452 us_vhost_usage(prgname);
458 /* Enable/disable vm2vm comms. */
459 if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
460 ret = parse_num_opt(optarg, 1);
462 RTE_LOG(INFO, CONFIG, "Invalid argument for vm2vm [0|1]\n");
463 us_vhost_usage(prgname);
470 /* Enable/disable stats. */
471 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
472 ret = parse_num_opt(optarg, INT32_MAX);
474 RTE_LOG(INFO, CONFIG, "Invalid argument for stats [0..N]\n");
475 us_vhost_usage(prgname);
483 /* Invalid option - print options. */
485 us_vhost_usage(prgname);
490 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
491 if (enabled_port_mask & (1 << i))
492 ports[num_ports++] = (uint8_t)i;
495 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
496 RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
497 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
505 * Update the global var NUM_PORTS and array PORTS according to system ports number
506 * and return valid ports number
508 static unsigned check_ports_num(unsigned nb_ports)
510 unsigned valid_num_ports = num_ports;
513 if (num_ports > nb_ports) {
514 RTE_LOG(INFO, PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
515 num_ports, nb_ports);
516 num_ports = nb_ports;
519 for (portid = 0; portid < num_ports; portid ++) {
520 if (ports[portid] >= nb_ports) {
521 RTE_LOG(INFO, PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
522 ports[portid], (nb_ports - 1));
523 ports[portid] = INVALID_PORT_ID;
527 return valid_num_ports;
531 * Macro to print out packet contents. Wrapped in debug define so that the
532 * data path is not effected when debug is disabled.
535 #define PRINT_PACKET(device, addr, size, header) do { \
536 char *pkt_addr = (char*)(addr); \
537 unsigned int index; \
538 char packet[MAX_PRINT_BUFF]; \
541 rte_snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
543 rte_snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
544 for (index = 0; index < (size); index++) { \
545 rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
546 "%02hhx ", pkt_addr[index]); \
548 rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
550 LOG_DEBUG(DATA, "%s", packet); \
553 #define PRINT_PACKET(device, addr, size, header) do{} while(0)
557 * Function to convert guest physical addresses to vhost virtual addresses. This
558 * is used to convert virtio buffer addresses.
560 static inline uint64_t __attribute__((always_inline))
561 gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
563 struct virtio_memory_regions *region;
565 uint64_t vhost_va = 0;
567 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
568 region = &dev->mem->regions[regionidx];
569 if ((guest_pa >= region->guest_phys_address) &&
570 (guest_pa <= region->guest_phys_address_end)) {
571 vhost_va = region->address_offset + guest_pa;
575 LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| VVA %p\n",
576 dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
582 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
583 * be received from the physical port or from another virtio device. A packet
584 * count is returned to indicate the number of packets that were succesfully
585 * added to the RX queue.
587 static inline uint32_t __attribute__((always_inline))
588 virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
590 struct vhost_virtqueue *vq;
591 struct vring_desc *desc;
592 struct rte_mbuf *buff;
593 /* The virtio_hdr is initialised to 0. */
594 struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
595 uint64_t buff_addr = 0;
596 uint64_t buff_hdr_addr = 0;
597 uint32_t head[MAX_PKT_BURST], packet_len = 0;
598 uint32_t head_idx, packet_success = 0;
599 uint16_t avail_idx, res_cur_idx;
600 uint16_t res_base_idx, res_end_idx;
601 uint16_t free_entries;
604 LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
605 vq = dev->virtqueue_rx;
606 count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
607 /* As many data cores may want access to available buffers, they need to be reserved. */
610 res_base_idx = vq->last_used_idx_res;
612 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
614 free_entries = (avail_idx - res_base_idx);
616 /*check that we have enough buffers*/
617 if (unlikely(count > free_entries))
618 count = free_entries;
623 res_end_idx = res_base_idx + count;
624 /* vq->last_used_idx_res is atomically updated. */
625 success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
627 } while (unlikely(success == 0));
628 res_cur_idx = res_base_idx;
629 LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
631 /* Prefetch available ring to retrieve indexes. */
632 rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
634 /* Retrieve all of the head indexes first to avoid caching issues. */
635 for (head_idx = 0; head_idx < count; head_idx++)
636 head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
638 /*Prefetch descriptor index. */
639 rte_prefetch0(&vq->desc[head[packet_success]]);
641 while (res_cur_idx != res_end_idx) {
642 /* Get descriptor from available ring */
643 desc = &vq->desc[head[packet_success]];
644 /* Prefetch descriptor address. */
647 buff = pkts[packet_success];
649 /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
650 buff_addr = gpa_to_vva(dev, desc->addr);
651 /* Prefetch buffer address. */
652 rte_prefetch0((void*)(uintptr_t)buff_addr);
655 /* Copy virtio_hdr to packet and increment buffer address */
656 buff_hdr_addr = buff_addr;
657 packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
660 * If the descriptors are chained the header and data are placed in
663 if (desc->flags & VRING_DESC_F_NEXT) {
664 desc->len = vq->vhost_hlen;
665 desc = &vq->desc[desc->next];
666 /* Buffer address translation. */
667 buff_addr = gpa_to_vva(dev, desc->addr);
668 desc->len = rte_pktmbuf_data_len(buff);
670 buff_addr += vq->vhost_hlen;
671 desc->len = packet_len;
675 /* Update used ring with desc information */
676 vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
677 vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
679 /* Copy mbuf data to buffer */
680 rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
685 /* mergeable is disabled then a header is required per buffer. */
686 rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
687 if (res_cur_idx < res_end_idx) {
688 /* Prefetch descriptor index. */
689 rte_prefetch0(&vq->desc[head[packet_success]]);
693 rte_compiler_barrier();
695 /* Wait until it's our turn to add our buffer to the used ring. */
696 while (unlikely(vq->last_used_idx != res_base_idx))
699 *(volatile uint16_t *)&vq->used->idx += count;
701 vq->last_used_idx = res_end_idx;
707 * Compares a packet destination MAC address to a device MAC address.
709 static inline int __attribute__((always_inline))
710 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
712 return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
716 * This function registers mac along with a
717 * vlan tag to a VMDQ.
720 link_vmdq(struct virtio_net *dev)
723 struct virtio_net_data_ll *dev_ll;
725 dev_ll = ll_root_used;
727 while (dev_ll != NULL) {
728 if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
729 RTE_LOG(INFO, DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
732 dev_ll = dev_ll->next;
735 /* vlan_tag currently uses the device_id. */
736 dev->vlan_tag = vlan_tags[dev->device_fh];
737 dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
739 /* Print out VMDQ registration info. */
740 RTE_LOG(INFO, DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
742 dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
743 dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
744 dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
747 /* Register the MAC address. */
748 ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
750 RTE_LOG(ERR, DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
755 /* Enable stripping of the vlan tag as we handle routing. */
756 rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
758 rte_compiler_barrier();
759 /* Set device as ready for RX. */
760 dev->ready = DEVICE_READY;
766 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
767 * queue before disabling RX on the device.
770 unlink_vmdq(struct virtio_net *dev)
774 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
776 if (dev->ready == DEVICE_READY) {
777 /*clear MAC and VLAN settings*/
778 rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
779 for (i = 0; i < 6; i++)
780 dev->mac_address.addr_bytes[i] = 0;
784 /*Clear out the receive buffers*/
785 rx_count = rte_eth_rx_burst(ports[0],
786 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
789 for (i = 0; i < rx_count; i++)
790 rte_pktmbuf_free(pkts_burst[i]);
792 rx_count = rte_eth_rx_burst(ports[0],
793 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
796 dev->ready = DEVICE_NOT_READY;
801 * Check if the packet destination MAC address is for a local device. If so then put
802 * the packet on that devices RX queue. If not then return.
804 static inline unsigned __attribute__((always_inline))
805 virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
807 struct virtio_net_data_ll *dev_ll;
808 struct ether_hdr *pkt_hdr;
811 pkt_hdr = (struct ether_hdr *)m->pkt.data;
813 /*get the used devices list*/
814 dev_ll = ll_root_used;
816 while (dev_ll != NULL) {
817 if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
818 &dev_ll->dev->mac_address)) {
820 /* Drop the packet if the TX packet is destined for the TX device. */
821 if (dev_ll->dev->device_fh == dev->device_fh) {
822 LOG_DEBUG(DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
823 dev_ll->dev->device_fh);
828 LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
830 if (dev_ll->dev->remove) {
831 /*drop the packet if the device is marked for removal*/
832 LOG_DEBUG(DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
834 /*send the packet to the local virtio device*/
835 ret = virtio_dev_rx(dev_ll->dev, &m, 1);
837 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
838 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
839 dev_statistics[dev->device_fh].tx_total++;
840 dev_statistics[dev->device_fh].tx += ret;
846 dev_ll = dev_ll->next;
853 * This function routes the TX packet to the correct interface. This may be a local device
854 * or the physical port.
856 static inline void __attribute__((always_inline))
857 virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
859 struct mbuf_table *tx_q;
860 struct vlan_ethhdr *vlan_hdr;
861 struct rte_mbuf **m_table;
862 struct rte_mbuf *mbuf;
864 const uint16_t lcore_id = rte_lcore_id();
866 /*check if destination is local VM*/
867 if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
871 LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
873 /*Add packet to the port tx queue*/
874 tx_q = &lcore_tx_queue[lcore_id];
877 /* Allocate an mbuf and populate the structure. */
878 mbuf = rte_pktmbuf_alloc(mbuf_pool);
882 mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
883 mbuf->pkt.pkt_len = mbuf->pkt.data_len;
885 /* Copy ethernet header to mbuf. */
886 rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
889 /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
890 vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
891 vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
892 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
893 vlan_hdr->h_vlan_TCI = htons(vlan_tag);
895 /* Copy the remaining packet contents to the mbuf. */
896 rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
897 (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
898 tx_q->m_table[len] = mbuf;
901 dev_statistics[dev->device_fh].tx_total++;
902 dev_statistics[dev->device_fh].tx++;
905 if (unlikely(len == MAX_PKT_BURST)) {
906 m_table = (struct rte_mbuf **)tx_q->m_table;
907 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
908 /* Free any buffers not handled by TX and update the port stats. */
909 if (unlikely(ret < len)) {
911 rte_pktmbuf_free(m_table[ret]);
912 } while (++ret < len);
922 static inline void __attribute__((always_inline))
923 virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
926 struct vhost_virtqueue *vq;
927 struct vring_desc *desc;
928 uint64_t buff_addr = 0;
929 uint32_t head[MAX_PKT_BURST];
932 uint16_t free_entries, packet_success = 0;
935 vq = dev->virtqueue_tx;
936 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
938 /* If there are no available buffers then return. */
939 if (vq->last_used_idx == avail_idx)
942 LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
944 /* Prefetch available ring to retrieve head indexes. */
945 rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
947 /*get the number of free entries in the ring*/
948 free_entries = avail_idx - vq->last_used_idx;
949 free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
951 LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
952 /* Retrieve all of the head indexes first to avoid caching issues. */
953 for (i = 0; i < free_entries; i++)
954 head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
956 /* Prefetch descriptor index. */
957 rte_prefetch0(&vq->desc[head[packet_success]]);
959 while (packet_success < free_entries) {
960 desc = &vq->desc[head[packet_success]];
961 /* Prefetch descriptor address. */
964 if (packet_success < (free_entries - 1)) {
965 /* Prefetch descriptor index. */
966 rte_prefetch0(&vq->desc[head[packet_success+1]]);
969 /* Update used index buffer information. */
970 used_idx = vq->last_used_idx & (vq->size - 1);
971 vq->used->ring[used_idx].id = head[packet_success];
972 vq->used->ring[used_idx].len = 0;
974 /* Discard first buffer as it is the virtio header */
975 desc = &vq->desc[desc->next];
977 /* Buffer address translation. */
978 buff_addr = gpa_to_vva(dev, desc->addr);
979 /* Prefetch buffer address. */
980 rte_prefetch0((void*)(uintptr_t)buff_addr);
982 /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
983 m.pkt.data_len = desc->len;
984 m.pkt.data = (void*)(uintptr_t)buff_addr;
987 virtio_tx_route(dev, &m, mbuf_pool, 0);
993 rte_compiler_barrier();
994 vq->used->idx += packet_success;
995 /* Kick guest if required. */
999 * This function is called by each data core. It handles all RX/TX registered with the
1000 * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
1001 * with all devices in the main linked list.
1004 switch_worker(__attribute__((unused)) void *arg)
1006 struct rte_mempool *mbuf_pool = arg;
1007 struct virtio_net *dev = NULL;
1008 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1009 struct virtio_net_data_ll *dev_ll;
1010 struct mbuf_table *tx_q;
1011 volatile struct lcore_ll_info *lcore_ll;
1012 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
1013 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
1015 const uint16_t lcore_id = rte_lcore_id();
1016 const uint16_t num_cores = (uint16_t)rte_lcore_count();
1017 uint16_t rx_count = 0;
1019 RTE_LOG(INFO, DATA, "Procesing on Core %u started \n", lcore_id);
1020 lcore_ll = lcore_info[lcore_id].lcore_ll;
1023 tx_q = &lcore_tx_queue[lcore_id];
1024 for (i = 0; i < num_cores; i ++) {
1025 if (lcore_ids[i] == lcore_id) {
1032 cur_tsc = rte_rdtsc();
1034 * TX burst queue drain
1036 diff_tsc = cur_tsc - prev_tsc;
1037 if (unlikely(diff_tsc > drain_tsc)) {
1040 LOG_DEBUG(DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
1042 /*Tx any packets in the queue*/
1043 ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
1044 (struct rte_mbuf **)tx_q->m_table,
1045 (uint16_t)tx_q->len);
1046 if (unlikely(ret < tx_q->len)) {
1048 rte_pktmbuf_free(tx_q->m_table[ret]);
1049 } while (++ret < tx_q->len);
1060 * Inform the configuration core that we have exited the linked list and that no devices are
1061 * in use if requested.
1063 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
1064 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1069 dev_ll = lcore_ll->ll_root_used;
1071 while (dev_ll != NULL) {
1072 /*get virtio device ID*/
1075 if (unlikely(dev->remove)) {
1076 dev_ll = dev_ll->next;
1078 dev->ready = DEVICE_SAFE_REMOVE;
1081 if (likely(dev->ready == DEVICE_READY)) {
1083 rx_count = rte_eth_rx_burst(ports[0],
1084 (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
1087 ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
1089 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
1090 rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
1092 while (likely(rx_count)) {
1094 rte_pktmbuf_free_seg(pkts_burst[rx_count]);
1100 if (likely(!dev->remove))
1102 virtio_dev_tx(dev, mbuf_pool);
1104 /*move to the next device in the list*/
1105 dev_ll = dev_ll->next;
1113 * Add an entry to a used linked list. A free entry must first be found in the free linked list
1114 * using get_data_ll_free_entry();
1117 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1119 struct virtio_net_data_ll *ll = *ll_root_addr;
1121 /* Set next as NULL and use a compiler barrier to avoid reordering. */
1122 ll_dev->next = NULL;
1123 rte_compiler_barrier();
1125 /* If ll == NULL then this is the first device. */
1127 /* Increment to the tail of the linked list. */
1128 while ((ll->next != NULL) )
1133 *ll_root_addr = ll_dev;
1138 * Remove an entry from a used linked list. The entry must then be added to the free linked list
1139 * using put_data_ll_free_entry().
1142 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
1144 struct virtio_net_data_ll *ll = *ll_root_addr;
1147 *ll_root_addr = ll_dev->next;
1149 ll_dev_last->next = ll_dev->next;
1153 * Find and return an entry from the free linked list.
1155 static struct virtio_net_data_ll *
1156 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
1158 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1159 struct virtio_net_data_ll *ll_dev;
1161 if (ll_free == NULL)
1165 *ll_root_addr = ll_free->next;
1171 * Place an entry back on to the free linked list.
1174 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
1176 struct virtio_net_data_ll *ll_free = *ll_root_addr;
1178 ll_dev->next = ll_free;
1179 *ll_root_addr = ll_dev;
1183 * Creates a linked list of a given size.
1185 static struct virtio_net_data_ll *
1186 alloc_data_ll(uint32_t size)
1188 struct virtio_net_data_ll *ll_new;
1191 /* Malloc and then chain the linked list. */
1192 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
1193 if (ll_new == NULL) {
1194 RTE_LOG(ERR, CONFIG, "Failed to allocate memory for ll_new.\n");
1198 for (i = 0; i < size - 1; i++) {
1199 ll_new[i].dev = NULL;
1200 ll_new[i].next = &ll_new[i+1];
1202 ll_new[i].next = NULL;
1208 * Create the main linked list along with each individual cores linked list. A used and a free list
1209 * are created to manage entries.
1216 RTE_LCORE_FOREACH_SLAVE(lcore) {
1217 lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
1218 if (lcore_info[lcore].lcore_ll == NULL) {
1219 RTE_LOG(ERR, CONFIG, "Failed to allocate memory for lcore_ll.\n");
1223 lcore_info[lcore].lcore_ll->device_num = 0;
1224 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
1225 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
1226 if (num_devices % num_switching_cores)
1227 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
1229 lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
1232 /* Allocate devices up to a maximum of MAX_DEVICES. */
1233 ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
1238 * Remove a device from the specific data core linked list and from the main linked list. The
1239 * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
1243 destroy_device (volatile struct virtio_net *dev)
1245 struct virtio_net_data_ll *ll_lcore_dev_cur;
1246 struct virtio_net_data_ll *ll_main_dev_cur;
1247 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
1248 struct virtio_net_data_ll *ll_main_dev_last = NULL;
1251 dev->flags &= ~VIRTIO_DEV_RUNNING;
1253 /*set the remove flag. */
1256 while(dev->ready != DEVICE_SAFE_REMOVE) {
1260 /* Search for entry to be removed from lcore ll */
1261 ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
1262 while (ll_lcore_dev_cur != NULL) {
1263 if (ll_lcore_dev_cur->dev == dev) {
1266 ll_lcore_dev_last = ll_lcore_dev_cur;
1267 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
1271 /* Search for entry to be removed from main ll */
1272 ll_main_dev_cur = ll_root_used;
1273 ll_main_dev_last = NULL;
1274 while (ll_main_dev_cur != NULL) {
1275 if (ll_main_dev_cur->dev == dev) {
1278 ll_main_dev_last = ll_main_dev_cur;
1279 ll_main_dev_cur = ll_main_dev_cur->next;
1283 if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
1284 RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
1288 /* Remove entries from the lcore and main ll. */
1289 rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
1290 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
1292 /* Set the dev_removal_flag on each lcore. */
1293 RTE_LCORE_FOREACH_SLAVE(lcore) {
1294 lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
1298 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
1299 * they can no longer access the device removed from the linked lists and that the devices
1300 * are no longer in use.
1302 RTE_LCORE_FOREACH_SLAVE(lcore) {
1303 while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
1308 /* Add the entries back to the lcore and main free ll.*/
1309 put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
1310 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
1312 /* Decrement number of device on the lcore. */
1313 lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
1315 RTE_LOG(INFO, DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
1319 * A new device is added to a data core. First the device is added to the main linked list
1320 * and the allocated to a specific data core.
1323 new_device (struct virtio_net *dev)
1325 struct virtio_net_data_ll *ll_dev;
1326 int lcore, core_add = 0;
1327 uint32_t device_num_min = num_devices;
1329 /* Add device to main ll */
1330 ll_dev = get_data_ll_free_entry(&ll_root_free);
1331 if (ll_dev == NULL) {
1332 RTE_LOG(INFO, DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
1333 "of %d devices per core has been reached\n",
1334 dev->device_fh, num_devices);
1338 add_data_ll_entry(&ll_root_used, ll_dev);
1340 /*reset ready flag*/
1341 dev->ready = DEVICE_NOT_READY;
1344 /* Find a suitable lcore to add the device. */
1345 RTE_LCORE_FOREACH_SLAVE(lcore) {
1346 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
1347 device_num_min = lcore_info[lcore].lcore_ll->device_num;
1351 /* Add device to lcore ll */
1352 ll_dev->dev->coreid = core_add;
1353 ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
1354 if (ll_dev == NULL) {
1355 RTE_LOG(INFO, DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
1356 destroy_device(dev);
1360 add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
1362 /* Initialize device stats */
1363 memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
1365 lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
1366 dev->flags |= VIRTIO_DEV_RUNNING;
1368 RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
1376 * These callback allow devices to be added to the data core when configuration
1377 * has been fully complete.
1379 static const struct virtio_net_device_ops virtio_net_device_ops =
1381 .new_device = new_device,
1382 .destroy_device = destroy_device,
1386 * This is a thread will wake up after a period to print stats if the user has
1392 struct virtio_net_data_ll *dev_ll;
1393 uint64_t tx_dropped, rx_dropped;
1394 uint64_t tx, tx_total, rx, rx_total;
1396 const char clr[] = { 27, '[', '2', 'J', '\0' };
1397 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1400 sleep(enable_stats);
1402 /* Clear screen and move to top left */
1403 printf("%s%s", clr, top_left);
1405 printf("\nDevice statistics ====================================");
1407 dev_ll = ll_root_used;
1408 while (dev_ll != NULL) {
1409 device_fh = (uint32_t)dev_ll->dev->device_fh;
1410 tx_total = dev_statistics[device_fh].tx_total;
1411 tx = dev_statistics[device_fh].tx;
1412 tx_dropped = tx_total - tx;
1413 rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
1414 rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
1415 rx_dropped = rx_total - rx;
1417 printf("\nStatistics for device %"PRIu32" ------------------------------"
1418 "\nTX total: %"PRIu64""
1419 "\nTX dropped: %"PRIu64""
1420 "\nTX successful: %"PRIu64""
1421 "\nRX total: %"PRIu64""
1422 "\nRX dropped: %"PRIu64""
1423 "\nRX successful: %"PRIu64"",
1432 dev_ll = dev_ll->next;
1434 printf("\n======================================================\n");
1439 int init_virtio_net(struct virtio_net_device_ops const * const ops);
1442 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1443 * device is also registered here to handle the IOCTLs.
1446 MAIN(int argc, char *argv[])
1448 struct rte_mempool *mbuf_pool;
1449 unsigned lcore_id, core_id = 0;
1450 unsigned nb_ports, valid_num_ports;
1453 static pthread_t tid;
1456 ret = rte_eal_init(argc, argv);
1458 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1462 /* parse app arguments */
1463 ret = us_vhost_parse_args(argc, argv);
1465 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1467 if (rte_pmd_init_all() != 0 || rte_eal_pci_probe() != 0)
1468 rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n");
1470 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1471 if (rte_lcore_is_enabled(lcore_id))
1472 lcore_ids[core_id ++] = lcore_id;
1474 if (rte_lcore_count() > RTE_MAX_LCORE)
1475 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1477 /*set the number of swithcing cores available*/
1478 num_switching_cores = rte_lcore_count()-1;
1480 /* Get the number of physical ports. */
1481 nb_ports = rte_eth_dev_count();
1482 if (nb_ports > RTE_MAX_ETHPORTS)
1483 nb_ports = RTE_MAX_ETHPORTS;
1486 * Update the global var NUM_PORTS and global array PORTS
1487 * and get value of var VALID_NUM_PORTS according to system ports number
1489 valid_num_ports = check_ports_num(nb_ports);
1491 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1492 RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
1493 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1497 /* Create the mbuf pool. */
1498 mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports,
1499 MBUF_SIZE, MBUF_CACHE_SIZE,
1500 sizeof(struct rte_pktmbuf_pool_private),
1501 rte_pktmbuf_pool_init, NULL,
1502 rte_pktmbuf_init, NULL,
1503 rte_socket_id(), 0);
1504 if (mbuf_pool == NULL)
1505 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1507 /* Set log level. */
1508 rte_set_log_level(LOG_LEVEL);
1510 /* initialize all ports */
1511 for (portid = 0; portid < nb_ports; portid++) {
1512 /* skip ports that are not enabled */
1513 if ((enabled_port_mask & (1 << portid)) == 0) {
1514 RTE_LOG(INFO, PORT, "Skipping disabled port %d\n", portid);
1517 if (port_init(portid, mbuf_pool) != 0)
1518 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
1521 /* Initialise all linked lists. */
1522 if (init_data_ll() == -1)
1523 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1525 /* Initialize device stats */
1526 memset(&dev_statistics, 0, sizeof(dev_statistics));
1528 /* Enable stats if the user option is set. */
1530 pthread_create(&tid, NULL, (void*)print_stats, NULL );
1532 /* Launch all data cores. */
1533 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1534 rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
1537 init_virtio_xen(&virtio_net_device_ops);
1539 virtio_monitor_loop();