X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost_xen%2Fmain.c;h=3fcc138d274d3bc1234ff2e088622685edc6e6b5;hb=67b6d3039e9edbc4624c878c6930be5e126e8b58;hp=2ec44ee8f92dfcf4f33a839c87c0604176396152;hpb=e5ffdd1457c0fb4e8365f524ee2529ac726edcf3;p=dpdk.git diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c index 2ec44ee8f9..3fcc138d27 100644 --- a/examples/vhost_xen/main.c +++ b/examples/vhost_xen/main.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -61,13 +61,12 @@ /* * Calculate the number of buffers needed per port */ -#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \ +#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \ (num_switching_cores*MAX_PKT_BURST) + \ (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\ (num_switching_cores*MBUF_CACHE_SIZE)) #define MBUF_CACHE_SIZE 64 -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) /* * RX and TX Prefetch, Host, and Write-back threshold values should be @@ -88,9 +87,9 @@ #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */ #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */ -#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ -#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */ -#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ +#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ +#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */ +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ /* State of virtio device. */ #define DEVICE_NOT_READY 0 @@ -110,7 +109,7 @@ /* Max number of devices. Limited by vmdq. */ #define MAX_DEVICES 64 -/* Size of buffers used for rte_snprintfs. */ +/* Size of buffers used for snprintfs. */ #define MAX_PRINT_BUFF 6072 @@ -135,31 +134,6 @@ static uint32_t enable_vm2vm = 1; /* Enable stats. */ static uint32_t enable_stats = 0; -/* Default configuration for rx and tx thresholds etc. */ -static const struct rte_eth_rxconf rx_conf_default = { - .rx_thresh = { - .pthresh = RX_PTHRESH, - .hthresh = RX_HTHRESH, - .wthresh = RX_WTHRESH, - }, - .rx_drop_en = 1, -}; - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -static const struct rte_eth_txconf tx_conf_default = { - .tx_thresh = { - .pthresh = TX_PTHRESH, - .hthresh = TX_HTHRESH, - .wthresh = TX_WTHRESH, - }, - .tx_free_thresh = 0, /* Use PMD default values */ - .tx_rs_thresh = 0, /* Use PMD default values */ -}; - /* empty vmdq configuration structure. Filled in programatically */ static const struct rte_eth_conf vmdq_conf_default = { .rxmode = { @@ -287,7 +261,7 @@ static inline int validate_num_devices(uint32_t max_nb_devices) { if (num_devices > max_nb_devices) { - RTE_LOG(ERR, PORT, "invalid number of devices\n"); + RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n"); return -1; } return 0; @@ -301,6 +275,7 @@ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf *rxconf; struct rte_eth_conf port_conf; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; @@ -331,17 +306,21 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) if (retval != 0) return retval; + rte_eth_dev_info_get(port, &dev_info); + rxconf = &dev_info.default_rxconf; + rxconf->rx_drop_en = 1; /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, - rte_eth_dev_socket_id(port), &rx_conf_default, + rte_eth_dev_socket_id(port), rxconf, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, - rte_eth_dev_socket_id(port), &tx_conf_default); + rte_eth_dev_socket_id(port), + NULL); if (retval < 0) return retval; } @@ -352,8 +331,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) return retval; rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); - RTE_LOG(INFO, PORT, "Max virtio devices supported: %u\n", num_devices); - RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 + RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); + RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, vmdq_ports_eth_addr[port].addr_bytes[0], @@ -418,7 +397,7 @@ parse_num_opt(const char *q_arg, uint32_t max_valid_value) static void us_vhost_usage(const char *prgname) { - RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n" + RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n", @@ -448,7 +427,7 @@ us_vhost_parse_args(int argc, char **argv) case 'p': enabled_port_mask = parse_portmask(optarg); if (enabled_port_mask == 0) { - RTE_LOG(INFO, CONFIG, "Invalid portmask\n"); + RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n"); us_vhost_usage(prgname); return -1; } @@ -459,7 +438,7 @@ us_vhost_parse_args(int argc, char **argv) if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, 1); if (ret == -1) { - RTE_LOG(INFO, CONFIG, "Invalid argument for vm2vm [0|1]\n"); + RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n"); us_vhost_usage(prgname); return -1; } else { @@ -471,7 +450,7 @@ us_vhost_parse_args(int argc, char **argv) if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); if (ret == -1) { - RTE_LOG(INFO, CONFIG, "Invalid argument for stats [0..N]\n"); + RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); us_vhost_usage(prgname); return -1; } else { @@ -493,7 +472,7 @@ us_vhost_parse_args(int argc, char **argv) } if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) { - RTE_LOG(INFO, PORT, "Current enabled port number is %u," + RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); return -1; } @@ -511,14 +490,14 @@ static unsigned check_ports_num(unsigned nb_ports) unsigned portid; if (num_ports > nb_ports) { - RTE_LOG(INFO, PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n", + RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n", num_ports, nb_ports); num_ports = nb_ports; } for (portid = 0; portid < num_ports; portid ++) { if (ports[portid] >= nb_ports) { - RTE_LOG(INFO, PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n", + RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n", ports[portid], (nb_ports - 1)); ports[portid] = INVALID_PORT_ID; valid_num_ports--; @@ -538,16 +517,16 @@ static unsigned check_ports_num(unsigned nb_ports) char packet[MAX_PRINT_BUFF]; \ \ if ((header)) \ - rte_snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \ + snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \ else \ - rte_snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \ + snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \ for (index = 0; index < (size); index++) { \ - rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \ + snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \ "%02hhx ", pkt_addr[index]); \ } \ - rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \ + snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \ \ - LOG_DEBUG(DATA, "%s", packet); \ + LOG_DEBUG(VHOST_DATA, "%s", packet); \ } while(0) #else #define PRINT_PACKET(device, addr, size, header) do{} while(0) @@ -572,7 +551,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa) break; } } - LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| VVA %p\n", + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n", dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va); return vhost_va; @@ -601,7 +580,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) uint16_t free_entries; uint8_t success = 0; - LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); vq = dev->virtqueue_rx; count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; /* As many data cores may want access to available buffers, they need to be reserved. */ @@ -626,7 +605,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) res_end_idx); } while (unlikely(success == 0)); res_cur_idx = res_base_idx; - LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx); /* Prefetch available ring to retrieve indexes. */ rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]); @@ -677,12 +656,12 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len; /* Copy mbuf data to buffer */ - rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff)); + rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff)); res_cur_idx++; packet_success++; - /* mergeable is disabled then a header is required per buffer. */ + /* mergeable is disabled then a header is required per buffer. */ rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen); if (res_cur_idx < res_end_idx) { /* Prefetch descriptor index. */ @@ -726,7 +705,7 @@ link_vmdq(struct virtio_net *dev) while (dev_ll != NULL) { if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) { - RTE_LOG(INFO, DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh); + RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh); return -1; } dev_ll = dev_ll->next; @@ -737,7 +716,7 @@ link_vmdq(struct virtio_net *dev) dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices); /* Print out VMDQ registration info. */ - RTE_LOG(INFO, DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n", + RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n", dev->device_fh, dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1], dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3], @@ -746,8 +725,8 @@ link_vmdq(struct virtio_net *dev) /* Register the MAC address. */ ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh); - if (ret) { - RTE_LOG(ERR, DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", + if (ret) { + RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", dev->device_fh); return -1; } @@ -808,7 +787,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m) struct ether_hdr *pkt_hdr; uint64_t ret = 0; - pkt_hdr = (struct ether_hdr *)m->pkt.data; + pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); /*get the used devices list*/ dev_ll = ll_root_used; @@ -819,17 +798,17 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m) /* Drop the packet if the TX packet is destined for the TX device. */ if (dev_ll->dev->device_fh == dev->device_fh) { - LOG_DEBUG(DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n", + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n", dev_ll->dev->device_fh); return 0; } - LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh); if (dev_ll->dev->remove) { /*drop the packet if the device is marked for removal*/ - LOG_DEBUG(DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh); } else { /*send the packet to the local virtio device*/ ret = virtio_dev_rx(dev_ll->dev, &m, 1); @@ -868,7 +847,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * return; } - LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh); /*Add packet to the port tx queue*/ tx_q = &lcore_tx_queue[lcore_id]; @@ -879,22 +858,24 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * if(!mbuf) return; - mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN; - mbuf->pkt.pkt_len = mbuf->pkt.data_len; + mbuf->data_len = m->data_len + VLAN_HLEN; + mbuf->pkt_len = mbuf->data_len; /* Copy ethernet header to mbuf. */ - rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN); + rte_memcpy(rte_pktmbuf_mtod(mbuf, void*), + rte_pktmbuf_mtod(m, const void*), ETH_HLEN); /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/ - vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data; + vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *); vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto; vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); vlan_hdr->h_vlan_TCI = htons(vlan_tag); /* Copy the remaining packet contents to the mbuf. */ - rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN), - (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN)); + rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN), + rte_pktmbuf_mtod_offset(m, const void *, ETH_HLEN), + (m->data_len - ETH_HLEN)); tx_q->m_table[len] = mbuf; len++; if (enable_stats) { @@ -939,7 +920,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) if (vq->last_used_idx == avail_idx) return; - LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh); /* Prefetch available ring to retrieve head indexes. */ rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]); @@ -948,7 +929,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) free_entries = avail_idx - vq->last_used_idx; free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST; - LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries); /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < free_entries; i++) head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)]; @@ -980,9 +961,9 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) rte_prefetch0((void*)(uintptr_t)buff_addr); /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */ - m.pkt.data_len = desc->len; - m.pkt.data = (void*)(uintptr_t)buff_addr; - m.pkt.nb_segs = 1; + m.data_len = desc->len; + m.data_off = 0; + m.nb_segs = 1; virtio_tx_route(dev, &m, mbuf_pool, 0); @@ -999,7 +980,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) * This function is called by each data core. It handles all RX/TX registered with the * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared * with all devices in the main linked list. - */ + */ static int switch_worker(__attribute__((unused)) void *arg) { @@ -1016,7 +997,7 @@ switch_worker(__attribute__((unused)) void *arg) const uint16_t num_cores = (uint16_t)rte_lcore_count(); uint16_t rx_count = 0; - RTE_LOG(INFO, DATA, "Procesing on Core %u started \n", lcore_id); + RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id); lcore_ll = lcore_info[lcore_id].lcore_ll; prev_tsc = 0; @@ -1037,7 +1018,7 @@ switch_worker(__attribute__((unused)) void *arg) if (unlikely(diff_tsc > drain_tsc)) { if (tx_q->len) { - LOG_DEBUG(DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len); + LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len); /*Tx any packets in the queue*/ ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, @@ -1056,11 +1037,11 @@ switch_worker(__attribute__((unused)) void *arg) } - /* + /* * Inform the configuration core that we have exited the linked list and that no devices are * in use if requested. */ - if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL) + if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL) lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL; /* @@ -1191,7 +1172,7 @@ alloc_data_ll(uint32_t size) /* Malloc and then chain the linked list. */ ll_new = malloc(size * sizeof(struct virtio_net_data_ll)); if (ll_new == NULL) { - RTE_LOG(ERR, CONFIG, "Failed to allocate memory for ll_new.\n"); + RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n"); return NULL; } @@ -1216,7 +1197,7 @@ init_data_ll (void) RTE_LCORE_FOREACH_SLAVE(lcore) { lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info)); if (lcore_info[lcore].lcore_ll == NULL) { - RTE_LOG(ERR, CONFIG, "Failed to allocate memory for lcore_ll.\n"); + RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n"); return -1; } @@ -1235,7 +1216,7 @@ init_data_ll (void) return 0; } /* - * Remove a device from the specific data core linked list and from the main linked list. The + * Remove a device from the specific data core linked list and from the main linked list. The * rx/tx thread must be set the flag to indicate that it is safe to remove the device. * used. */ @@ -1293,8 +1274,8 @@ destroy_device (volatile struct virtio_net *dev) RTE_LCORE_FOREACH_SLAVE(lcore) { lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL; } - - /* + + /* * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that * they can no longer access the device removed from the linked lists and that the devices * are no longer in use. @@ -1311,8 +1292,8 @@ destroy_device (volatile struct virtio_net *dev) /* Decrement number of device on the lcore. */ lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--; - - RTE_LOG(INFO, DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh); + + RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh); } /* @@ -1329,7 +1310,7 @@ new_device (struct virtio_net *dev) /* Add device to main ll */ ll_dev = get_data_ll_free_entry(&ll_root_free); if (ll_dev == NULL) { - RTE_LOG(INFO, DATA, "(%"PRIu64") No free entry found in linked list. Device limit " + RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit " "of %d devices per core has been reached\n", dev->device_fh, num_devices); return -1; @@ -1352,7 +1333,7 @@ new_device (struct virtio_net *dev) ll_dev->dev->coreid = core_add; ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free); if (ll_dev == NULL) { - RTE_LOG(INFO, DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); + RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); destroy_device(dev); return -1; } @@ -1365,7 +1346,7 @@ new_device (struct virtio_net *dev) lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++; dev->flags |= VIRTIO_DEV_RUNNING; - RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid); + RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid); link_vmdq(dev); @@ -1438,12 +1419,12 @@ print_stats(void) int init_virtio_net(struct virtio_net_device_ops const * const ops); -/* +/* * Main function, does initialisation and calls the per-lcore functions. The CUSE * device is also registered here to handle the IOCTLs. */ int -MAIN(int argc, char *argv[]) +main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool; unsigned lcore_id, core_id = 0; @@ -1451,6 +1432,7 @@ MAIN(int argc, char *argv[]) int ret; uint8_t portid; static pthread_t tid; + char thread_name[RTE_MAX_THREAD_NAME_LEN]; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1464,9 +1446,6 @@ MAIN(int argc, char *argv[]) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid argument\n"); - if (rte_eal_pci_probe() != 0) - rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n"); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) if (rte_lcore_is_enabled(lcore_id)) lcore_ids[core_id ++] = lcore_id; @@ -1483,24 +1462,21 @@ MAIN(int argc, char *argv[]) nb_ports = RTE_MAX_ETHPORTS; /* - * Update the global var NUM_PORTS and global array PORTS - * and get value of var VALID_NUM_PORTS according to system ports number - */ + * Update the global var NUM_PORTS and global array PORTS + * and get value of var VALID_NUM_PORTS according to system ports number + */ valid_num_ports = check_ports_num(nb_ports); if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) { - RTE_LOG(INFO, PORT, "Current enabled port number is %u," + RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); return -1; } /* Create the mbuf pool. */ - mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports, - MBUF_SIZE, MBUF_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", + NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); @@ -1511,7 +1487,7 @@ MAIN(int argc, char *argv[]) for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { - RTE_LOG(INFO, PORT, "Skipping disabled port %d\n", portid); + RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid); continue; } if (port_init(portid, mbuf_pool) != 0) @@ -1526,8 +1502,19 @@ MAIN(int argc, char *argv[]) memset(&dev_statistics, 0, sizeof(dev_statistics)); /* Enable stats if the user option is set. */ - if (enable_stats) - pthread_create(&tid, NULL, (void*)print_stats, NULL ); + if (enable_stats) { + ret = pthread_create(&tid, NULL, (void *)print_stats, NULL); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Cannot create print-stats thread\n"); + + /* Set thread_name for aid in debugging. */ + snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats"); + ret = pthread_setname_np(tid, thread_name); + if (ret != 0) + RTE_LOG(ERR, VHOST_CONFIG, + "Cannot set print-stats name\n"); + } /* Launch all data cores. */ RTE_LCORE_FOREACH_SLAVE(lcore_id) {