X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost_xen%2Fmain.c;h=2b04c959d86b8162094b54b5817078ff85dbb5f4;hb=b4e0f64ff271ccc78f6275b6bd49194e86fd4f4b;hp=8162cd8eea54514e5a9f409cc5f6021c64915533;hpb=ea672a8b1655bbb44876d2550ff56f384968a43b;p=dpdk.git diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c index 8162cd8eea..2b04c959d8 100644 --- a/examples/vhost_xen/main.c +++ b/examples/vhost_xen/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -61,13 +61,12 @@ /* * Calculate the number of buffers needed per port */ -#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \ +#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \ (num_switching_cores*MAX_PKT_BURST) + \ (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\ (num_switching_cores*MBUF_CACHE_SIZE)) #define MBUF_CACHE_SIZE 64 -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) /* * RX and TX Prefetch, Host, and Write-back threshold values should be @@ -88,9 +87,9 @@ #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */ #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */ -#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ -#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */ -#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ +#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ +#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */ +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ /* State of virtio device. */ #define DEVICE_NOT_READY 0 @@ -135,31 +134,6 @@ static uint32_t enable_vm2vm = 1; /* Enable stats. */ static uint32_t enable_stats = 0; -/* Default configuration for rx and tx thresholds etc. */ -static const struct rte_eth_rxconf rx_conf_default = { - .rx_thresh = { - .pthresh = RX_PTHRESH, - .hthresh = RX_HTHRESH, - .wthresh = RX_WTHRESH, - }, - .rx_drop_en = 1, -}; - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -static const struct rte_eth_txconf tx_conf_default = { - .tx_thresh = { - .pthresh = TX_PTHRESH, - .hthresh = TX_HTHRESH, - .wthresh = TX_WTHRESH, - }, - .tx_free_thresh = 0, /* Use PMD default values */ - .tx_rs_thresh = 0, /* Use PMD default values */ -}; - /* empty vmdq configuration structure. Filled in programatically */ static const struct rte_eth_conf vmdq_conf_default = { .rxmode = { @@ -301,6 +275,7 @@ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf *rxconf; struct rte_eth_conf port_conf; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; @@ -331,17 +306,21 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) if (retval != 0) return retval; + rte_eth_dev_info_get(port, &dev_info); + rxconf = &dev_info.default_rxconf; + rxconf->rx_drop_en = 1; /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, - rte_eth_dev_socket_id(port), &rx_conf_default, + rte_eth_dev_socket_id(port), rxconf, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, - rte_eth_dev_socket_id(port), &tx_conf_default); + rte_eth_dev_socket_id(port), + NULL); if (retval < 0) return retval; } @@ -527,32 +506,6 @@ static unsigned check_ports_num(unsigned nb_ports) return valid_num_ports; } -/* - * Macro to print out packet contents. Wrapped in debug define so that the - * data path is not effected when debug is disabled. - */ -#ifdef DEBUG -#define PRINT_PACKET(device, addr, size, header) do { \ - char *pkt_addr = (char*)(addr); \ - unsigned int index; \ - char packet[MAX_PRINT_BUFF]; \ - \ - if ((header)) \ - snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \ - else \ - snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \ - for (index = 0; index < (size); index++) { \ - snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \ - "%02hhx ", pkt_addr[index]); \ - } \ - snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \ - \ - LOG_DEBUG(VHOST_DATA, "%s", packet); \ -} while(0) -#else -#define PRINT_PACKET(device, addr, size, header) do{} while(0) -#endif - /* * Function to convert guest physical addresses to vhost virtual addresses. This * is used to convert virtio buffer addresses. @@ -572,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa) break; } } - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n", + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n", dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va); return vhost_va; @@ -600,8 +553,9 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) uint16_t res_base_idx, res_end_idx; uint16_t free_entries; uint8_t success = 0; + void *userdata; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh); vq = dev->virtqueue_rx; count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; /* As many data cores may want access to available buffers, they need to be reserved. */ @@ -626,7 +580,8 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) res_end_idx); } while (unlikely(success == 0)); res_cur_idx = res_base_idx; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n", + dev->device_fh, res_cur_idx, res_end_idx); /* Prefetch available ring to retrieve indexes. */ rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]); @@ -677,13 +632,14 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len; /* Copy mbuf data to buffer */ - rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff)); + userdata = rte_pktmbuf_mtod(buff, void *); + rte_memcpy((void *)(uintptr_t)buff_addr, userdata, rte_pktmbuf_data_len(buff)); res_cur_idx++; packet_success++; /* mergeable is disabled then a header is required per buffer. */ - rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen); + rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen); if (res_cur_idx < res_end_idx) { /* Prefetch descriptor index. */ rte_prefetch0(&vq->desc[head[packet_success]]); @@ -709,7 +665,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) static inline int __attribute__((always_inline)) ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb) { - return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0); + return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0; } /* @@ -746,7 +702,7 @@ link_vmdq(struct virtio_net *dev) /* Register the MAC address. */ ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh); - if (ret) { + if (ret) { RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", dev->device_fh); return -1; @@ -808,7 +764,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m) struct ether_hdr *pkt_hdr; uint64_t ret = 0; - pkt_hdr = (struct ether_hdr *)m->data; + pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); /*get the used devices list*/ dev_ll = ll_root_used; @@ -819,17 +775,22 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m) /* Drop the packet if the TX packet is destined for the TX device. */ if (dev_ll->dev->device_fh == dev->device_fh) { - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n", - dev_ll->dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: " + "Source and destination MAC addresses are the same. " + "Dropping packet.\n", + dev_ll->dev->device_fh); return 0; } - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: " + "MAC address is local\n", dev_ll->dev->device_fh); if (dev_ll->dev->remove) { /*drop the packet if the device is marked for removal*/ - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") " + "Device is marked for removal\n", + dev_ll->dev->device_fh); } else { /*send the packet to the local virtio device*/ ret = virtio_dev_rx(dev_ll->dev, &m, 1); @@ -868,7 +829,8 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * return; } - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: " + "MAC address is external\n", dev->device_fh); /*Add packet to the port tx queue*/ tx_q = &lcore_tx_queue[lcore_id]; @@ -883,18 +845,20 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * mbuf->pkt_len = mbuf->data_len; /* Copy ethernet header to mbuf. */ - rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN); + rte_memcpy(rte_pktmbuf_mtod(mbuf, void*), + rte_pktmbuf_mtod(m, const void*), ETH_HLEN); /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/ - vlan_hdr = (struct vlan_ethhdr *) mbuf->data; + vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *); vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto; vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); vlan_hdr->h_vlan_TCI = htons(vlan_tag); /* Copy the remaining packet contents to the mbuf. */ - rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN), - (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN)); + rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN), + rte_pktmbuf_mtod_offset(m, const void *, ETH_HLEN), + (m->data_len - ETH_HLEN)); tx_q->m_table[len] = mbuf; len++; if (enable_stats) { @@ -939,7 +903,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) if (vq->last_used_idx == avail_idx) return; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n", + dev->device_fh); /* Prefetch available ring to retrieve head indexes. */ rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]); @@ -948,7 +913,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) free_entries = avail_idx - vq->last_used_idx; free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries); + RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n", + dev->device_fh, free_entries); /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < free_entries; i++) head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)]; @@ -981,7 +947,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool) /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */ m.data_len = desc->len; - m.data = (void*)(uintptr_t)buff_addr; + m.data_off = 0; m.nb_segs = 1; virtio_tx_route(dev, &m, mbuf_pool, 0); @@ -1037,7 +1003,9 @@ switch_worker(__attribute__((unused)) void *arg) if (unlikely(diff_tsc > drain_tsc)) { if (tx_q->len) { - LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len); + RTE_LOG(DEBUG, VHOST_DATA, + "TX queue drained after timeout with burst size %u\n", + tx_q->len); /*Tx any packets in the queue*/ ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, @@ -1201,7 +1169,7 @@ alloc_data_ll(uint32_t size) } ll_new[i].next = NULL; - return (ll_new); + return ll_new; } /* @@ -1443,7 +1411,7 @@ int init_virtio_net(struct virtio_net_device_ops const * const ops); * device is also registered here to handle the IOCTLs. */ int -MAIN(int argc, char *argv[]) +main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool; unsigned lcore_id, core_id = 0; @@ -1451,6 +1419,7 @@ MAIN(int argc, char *argv[]) int ret; uint8_t portid; static pthread_t tid; + char thread_name[RTE_MAX_THREAD_NAME_LEN]; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1464,9 +1433,6 @@ MAIN(int argc, char *argv[]) if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid argument\n"); - if (rte_eal_pci_probe() != 0) - rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n"); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) if (rte_lcore_is_enabled(lcore_id)) lcore_ids[core_id ++] = lcore_id; @@ -1479,13 +1445,11 @@ MAIN(int argc, char *argv[]) /* Get the number of physical ports. */ nb_ports = rte_eth_dev_count(); - if (nb_ports > RTE_MAX_ETHPORTS) - nb_ports = RTE_MAX_ETHPORTS; /* - * Update the global var NUM_PORTS and global array PORTS - * and get value of var VALID_NUM_PORTS according to system ports number - */ + * Update the global var NUM_PORTS and global array PORTS + * and get value of var VALID_NUM_PORTS according to system ports number + */ valid_num_ports = check_ports_num(nb_ports); if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) { @@ -1495,18 +1459,12 @@ MAIN(int argc, char *argv[]) } /* Create the mbuf pool. */ - mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports, - MBUF_SIZE, MBUF_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", + NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); - /* Set log level. */ - rte_set_log_level(LOG_LEVEL); - /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ @@ -1526,8 +1484,19 @@ MAIN(int argc, char *argv[]) memset(&dev_statistics, 0, sizeof(dev_statistics)); /* Enable stats if the user option is set. */ - if (enable_stats) - pthread_create(&tid, NULL, (void*)print_stats, NULL ); + if (enable_stats) { + ret = pthread_create(&tid, NULL, (void *)print_stats, NULL); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Cannot create print-stats thread\n"); + + /* Set thread_name for aid in debugging. */ + snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats"); + ret = rte_thread_setname(tid, thread_name); + if (ret != 0) + RTE_LOG(ERR, VHOST_CONFIG, + "Cannot set print-stats name\n"); + } /* Launch all data cores. */ RTE_LCORE_FOREACH_SLAVE(lcore_id) {