X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost%2Fmain.c;h=22d6a4b92aaa8367a6003dfc66808afd1203dc07;hb=ea0c20ea95fd5d71a10757e6598ac66233ea1495;hp=6eab664ddf76105680c1bf5999186bba2ba62b9b;hpb=9915bb1f212a62ff2e69ffdd4cb4739207fd1c53;p=dpdk.git diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 6eab664ddf..22d6a4b92a 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -49,12 +49,11 @@ #include #include #include +#include #include "main.h" -#include "virtio-net.h" -#include "vhost-net-cdev.h" -#define MAX_QUEUES 128 +#define MAX_QUEUES 512 /* the maximum number of external ports supported */ #define MAX_SUP_PORTS 1 @@ -68,7 +67,7 @@ (num_switching_cores*MBUF_CACHE_SIZE)) #define MBUF_CACHE_SIZE 128 -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define MBUF_DATA_SIZE (2048 + RTE_PKTMBUF_HEADROOM) /* * No frame data buffer allocated from host are required for zero copy @@ -76,31 +75,10 @@ * directly use it. */ #define VIRTIO_DESCRIPTOR_LEN_ZCP 1518 -#define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \ - + RTE_PKTMBUF_HEADROOM) +#define MBUF_DATA_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM) #define MBUF_CACHE_SIZE_ZCP 0 -/* - * RX and TX Prefetch, Host, and Write-back threshold values should be - * carefully set for optimal performance. Consult the network - * controller's datasheet and supporting DPDK documentation for guidance - * on how these parameters should be set. - */ -#define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */ -#define RX_HTHRESH 8 /* Default values of RX host threshold reg. */ -#define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */ - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */ -#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */ -#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */ - #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ -#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */ @@ -158,23 +136,32 @@ #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL /* Number of descriptors per cacheline. */ -#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc)) +#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) + +#define MBUF_EXT_MEM(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb)) /* mask of enabled ports */ static uint32_t enabled_port_mask = 0; +/* Promiscuous mode */ +static uint32_t promiscuous; + /*Number of switching cores enabled*/ static uint32_t num_switching_cores = 0; /* number of devices/queues to support*/ static uint32_t num_queues = 0; -uint32_t num_devices = 0; +static uint32_t num_devices; /* * Enable zero copy, pkts buffer will directly dma to hw descriptor, * disabled on default. */ static uint32_t zero_copy; +static int mergeable; + +/* Do vlan strip on host, enabled on default */ +static uint32_t vlan_strip = 1; /* number of descriptors to apply*/ static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP; @@ -218,37 +205,6 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; /* Character device basename. Can be set by user. */ static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; -/* Charater device index. Can be set by user. */ -static uint32_t dev_index = 0; - -/* This can be set by the user so it is made available here. */ -extern uint64_t VHOST_FEATURES; - -/* Default configuration for rx and tx thresholds etc. */ -static struct rte_eth_rxconf rx_conf_default = { - .rx_thresh = { - .pthresh = RX_PTHRESH, - .hthresh = RX_HTHRESH, - .wthresh = RX_WTHRESH, - }, - .rx_drop_en = 1, -}; - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -static struct rte_eth_txconf tx_conf_default = { - .tx_thresh = { - .pthresh = TX_PTHRESH, - .hthresh = TX_HTHRESH, - .wthresh = TX_WTHRESH, - }, - .tx_free_thresh = 0, /* Use PMD default values */ - .tx_rs_thresh = 0, /* Use PMD default values */ -}; - /* empty vmdq configuration structure. Filled in programatically */ static struct rte_eth_conf vmdq_conf_default = { .rxmode = { @@ -288,6 +244,9 @@ static struct rte_eth_conf vmdq_conf_default = { static unsigned lcore_ids[RTE_MAX_LCORE]; static uint8_t ports[RTE_MAX_ETHPORTS]; static unsigned num_ports = 0; /**< The number of ports specified in command line */ +static uint16_t num_pf_queues, num_vmdq_queues; +static uint16_t vmdq_pool_base, vmdq_queue_base; +static uint16_t queues_per_pool; static const uint16_t external_pkt_default_vlan_tag = 2000; const uint16_t vlan_tags[] = { @@ -370,13 +329,15 @@ static inline int get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) { struct rte_eth_vmdq_rx_conf conf; + struct rte_eth_vmdq_rx_conf *def_conf = + &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf; unsigned i; memset(&conf, 0, sizeof(conf)); conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; conf.nb_pool_maps = num_devices; - conf.enable_loop_back = - vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back; + conf.enable_loop_back = def_conf->enable_loop_back; + conf.rx_mode = def_conf->rx_mode; for (i = 0; i < conf.nb_pool_maps; i++) { conf.pool_map[i].vlan_id = vlan_tags[ i ]; @@ -413,7 +374,9 @@ port_init(uint8_t port) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; - uint16_t rx_rings, tx_rings; + struct rte_eth_rxconf *rxconf; + struct rte_eth_txconf *txconf; + int16_t rx_rings, tx_rings; uint16_t rx_ring_size, tx_ring_size; int retval; uint16_t q; @@ -421,9 +384,32 @@ port_init(uint8_t port) /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ rte_eth_dev_info_get (port, &dev_info); + if (dev_info.max_rx_queues > MAX_QUEUES) { + rte_exit(EXIT_FAILURE, + "please define MAX_QUEUES no less than %u in %s\n", + dev_info.max_rx_queues, __FILE__); + } + + rxconf = &dev_info.default_rxconf; + txconf = &dev_info.default_txconf; + rxconf->rx_drop_en = 1; + + /* Enable vlan offload */ + txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; + + /* + * Zero copy defers queue RX/TX start to the time when guest + * finishes its startup and packet buffers from that guest are + * available. + */ + if (zero_copy) { + rxconf->rx_deferred_start = 1; + rxconf->rx_drop_en = 0; + txconf->tx_deferred_start = 1; + } + /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; - num_queues = dev_info.max_rx_queues; if (zero_copy) { rx_ring_size = num_rx_descriptor; @@ -443,10 +429,19 @@ port_init(uint8_t port) retval = get_eth_conf(&port_conf, num_devices); if (retval < 0) return retval; + /* NIC queues are divided into pf queues and vmdq queues. */ + num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; + queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; + num_vmdq_queues = num_devices * queues_per_pool; + num_queues = num_pf_queues + num_vmdq_queues; + vmdq_queue_base = dev_info.vmdq_queue_base; + vmdq_pool_base = dev_info.vmdq_pool_base; + printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", + num_pf_queues, num_devices, queues_per_pool); if (port >= rte_eth_dev_count()) return -1; - rx_rings = (uint16_t)num_queues, + rx_rings = (uint16_t)dev_info.max_rx_queues; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) @@ -455,14 +450,16 @@ port_init(uint8_t port) /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, - rte_eth_dev_socket_id(port), &rx_conf_default, + rte_eth_dev_socket_id(port), + rxconf, vpool_array[q].pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, - rte_eth_dev_socket_id(port), &tx_conf_default); + rte_eth_dev_socket_id(port), + txconf); if (retval < 0) return retval; } @@ -474,6 +471,9 @@ port_init(uint8_t port) return retval; } + if (promiscuous) + rte_eth_promiscuous_enable(port); + rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 @@ -560,7 +560,7 @@ us_vhost_usage(const char *prgname) RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" " --vm2vm [0|1|2]\n" " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" - " --dev-basename --dev-index [0-N]\n" + " --dev-basename \n" " --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" @@ -568,9 +568,9 @@ us_vhost_usage(const char *prgname) " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" + " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" " --dev-basename: The basename to be used for the character device.\n" - " --dev-index [0-N]: Defaults to zero if not used. Index is appended to basename.\n" " --zero-copy [0|1]: disable(default)/enable rx/tx " "zero copy\n" " --rx-desc-num [0-N]: the number of descriptors on rx, " @@ -596,9 +596,9 @@ us_vhost_parse_args(int argc, char **argv) {"rx-retry-delay", required_argument, NULL, 0}, {"rx-retry-num", required_argument, NULL, 0}, {"mergeable", required_argument, NULL, 0}, + {"vlan-strip", required_argument, NULL, 0}, {"stats", required_argument, NULL, 0}, {"dev-basename", required_argument, NULL, 0}, - {"dev-index", required_argument, NULL, 0}, {"zero-copy", required_argument, NULL, 0}, {"rx-desc-num", required_argument, NULL, 0}, {"tx-desc-num", required_argument, NULL, 0}, @@ -606,7 +606,8 @@ us_vhost_parse_args(int argc, char **argv) }; /* Parse command line */ - while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) { + while ((opt = getopt_long(argc, argv, "p:P", + long_option, &option_index)) != EOF) { switch (opt) { /* Portmask */ case 'p': @@ -618,6 +619,15 @@ us_vhost_parse_args(int argc, char **argv) } break; + case 'P': + promiscuous = 1; + vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = + ETH_VMDQ_ACCEPT_BROADCAST | + ETH_VMDQ_ACCEPT_MULTICAST; + rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX); + + break; + case 0: /* Enable/disable vm2vm comms. */ if (!strncmp(long_option[option_index].name, "vm2vm", @@ -678,15 +688,31 @@ us_vhost_parse_args(int argc, char **argv) us_vhost_usage(prgname); return -1; } else { + mergeable = !!ret; if (ret) { vmdq_conf_default.rxmode.jumbo_frame = 1; vmdq_conf_default.rxmode.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE; - VHOST_FEATURES = (1ULL << VIRTIO_NET_F_MRG_RXBUF); } } } + /* Enable/disable RX VLAN strip on host. */ + if (!strncmp(long_option[option_index].name, + "vlan-strip", MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, 1); + if (ret == -1) { + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for VLAN strip [0|1]\n"); + us_vhost_usage(prgname); + return -1; + } else { + vlan_strip = !!ret; + vmdq_conf_default.rxmode.hw_vlan_strip = + vlan_strip; + } + } + /* Enable/disable stats. */ if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); @@ -708,17 +734,6 @@ us_vhost_parse_args(int argc, char **argv) } } - /* Set character device index. */ - if (!strncmp(long_option[option_index].name, "dev-index", MAX_LONG_OPT_SZ)) { - ret = parse_num_opt(optarg, INT32_MAX); - if (ret == -1) { - RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device index [0..N]\n"); - us_vhost_usage(prgname); - return -1; - } else - dev_index = ret; - } - /* Enable/disable rx/tx zero copy. */ if (!strncmp(long_option[option_index].name, "zero-copy", MAX_LONG_OPT_SZ)) { @@ -731,19 +746,6 @@ us_vhost_parse_args(int argc, char **argv) return -1; } else zero_copy = ret; - - if (zero_copy) { -#ifdef RTE_MBUF_REFCNT - RTE_LOG(ERR, VHOST_CONFIG, "Before running " - "zero copy vhost APP, please " - "disable RTE_MBUF_REFCNT\n" - "in config file and then rebuild DPDK " - "core lib!\n" - "Otherwise please disable zero copy " - "flag in command line!\n"); - return -1; -#endif - } } /* Specify the descriptor number on RX. */ @@ -950,13 +952,16 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) vdev->vlan_tag); /* Register the MAC address. */ - ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, (uint32_t)dev->device_fh); + ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, + (uint32_t)dev->device_fh + vmdq_pool_base); if (ret) RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", dev->device_fh); /* Enable stripping of the vlan tag as we handle routing. */ - rte_eth_dev_set_vlan_strip_on_queue(ports[0], (uint16_t)vdev->vmdq_rx_q, 1); + if (vlan_strip) + rte_eth_dev_set_vlan_strip_on_queue(ports[0], + (uint16_t)vdev->vmdq_rx_q, 1); /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; @@ -1003,7 +1008,7 @@ unlink_vmdq(struct vhost_dev *vdev) * Check if the packet destination MAC address is for a local device. If so then put * the packet on that devices RX queue. If not then return. */ -static inline unsigned __attribute__((always_inline)) +static inline int __attribute__((always_inline)) virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) { struct virtio_net_data_ll *dev_ll; @@ -1032,21 +1037,12 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh); - if (dev_ll->vdev->remove) { + if (unlikely(dev_ll->vdev->remove)) { /*drop the packet if the device is marked for removal*/ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh); } else { - uint32_t mergeable = - dev_ll->dev->features & - (1 << VIRTIO_NET_F_MRG_RXBUF); - /*send the packet to the local virtio device*/ - if (likely(mergeable == 0)) - ret = virtio_dev_rx(dev_ll->dev, &m, 1); - else - ret = virtio_dev_merge_rx(dev_ll->dev, - &m, 1); - + ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1); if (enable_stats) { rte_atomic64_add( &dev_statistics[tdev->device_fh].rx_total_atomic, @@ -1067,58 +1063,81 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) return -1; } +/* + * Check if the destination MAC of a packet is one local VM, + * and get its vlan tag, and offset if it is. + */ +static inline int __attribute__((always_inline)) +find_local_dest(struct virtio_net *dev, struct rte_mbuf *m, + uint32_t *offset, uint16_t *vlan_tag) +{ + struct virtio_net_data_ll *dev_ll = ll_root_used; + struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + + while (dev_ll != NULL) { + if ((dev_ll->vdev->ready == DEVICE_RX) + && ether_addr_cmp(&(pkt_hdr->d_addr), + &dev_ll->vdev->mac_address)) { + /* + * Drop the packet if the TX packet is + * destined for the TX device. + */ + if (dev_ll->vdev->dev->device_fh == dev->device_fh) { + LOG_DEBUG(VHOST_DATA, + "(%"PRIu64") TX: Source and destination" + " MAC addresses are the same. Dropping " + "packet.\n", + dev_ll->vdev->dev->device_fh); + return -1; + } + + /* + * HW vlan strip will reduce the packet length + * by minus length of vlan tag, so need restore + * the packet length by plus it. + */ + *offset = VLAN_HLEN; + *vlan_tag = + (uint16_t) + vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh]; + + LOG_DEBUG(VHOST_DATA, + "(%"PRIu64") TX: pkt to local VM device id:" + "(%"PRIu64") vlan tag: %d.\n", + dev->device_fh, dev_ll->vdev->dev->device_fh, + vlan_tag); + + break; + } + dev_ll = dev_ll->next; + } + return 0; +} + /* * This function routes the TX packet to the correct interface. This may be a local device * or the physical port. */ static inline void __attribute__((always_inline)) -virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag) +virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) { struct mbuf_table *tx_q; - struct vlan_ethhdr *vlan_hdr; struct rte_mbuf **m_table; - struct rte_mbuf *mbuf, *prev; unsigned len, ret, offset = 0; const uint16_t lcore_id = rte_lcore_id(); - struct virtio_net_data_ll *dev_ll = ll_root_used; - struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); struct virtio_net *dev = vdev->dev; + struct ether_hdr *nh; /*check if destination is local VM*/ - if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) + if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) { + rte_pktmbuf_free(m); return; + } - if (vm2vm_mode == VM2VM_HARDWARE) { - while (dev_ll != NULL) { - if ((dev_ll->vdev->ready == DEVICE_RX) - && ether_addr_cmp(&(pkt_hdr->d_addr), - &dev_ll->vdev->mac_address)) { - /* - * Drop the packet if the TX packet is - * destined for the TX device. - */ - if (dev_ll->vdev->dev->device_fh == dev->device_fh) { - LOG_DEBUG(VHOST_DATA, - "(%"PRIu64") TX: Source and destination" - " MAC addresses are the same. Dropping " - "packet.\n", - dev_ll->vdev->device_fh); - return; - } - offset = 4; - vlan_tag = - (uint16_t) - vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh]; - - LOG_DEBUG(VHOST_DATA, - "(%"PRIu64") TX: pkt to local VM device id:" - "(%"PRIu64") vlan tag: %d.\n", - dev->device_fh, dev_ll->vdev->dev->device_fh, - vlan_tag); - - break; - } - dev_ll = dev_ll->next; + if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { + if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) { + rte_pktmbuf_free(m); + return; } } @@ -1128,58 +1147,40 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, struct rte_mempool * tx_q = &lcore_tx_queue[lcore_id]; len = tx_q->len; - /* Allocate an mbuf and populate the structure. */ - mbuf = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(mbuf == NULL)) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - return; - } + nh = rte_pktmbuf_mtod(m, struct ether_hdr *); + if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) { + /* Guest has inserted the vlan tag. */ + struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1); + uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); + if ((vm2vm_mode == VM2VM_HARDWARE) && + (vh->vlan_tci != vlan_tag_be)) + vh->vlan_tci = vlan_tag_be; + } else { + m->ol_flags = PKT_TX_VLAN_PKT; - mbuf->data_len = m->data_len + VLAN_HLEN + offset; - mbuf->pkt_len = m->pkt_len + VLAN_HLEN + offset; - mbuf->nb_segs = m->nb_segs; + /* + * Find the right seg to adjust the data len when offset is + * bigger than tail room size. + */ + if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { + if (likely(offset <= rte_pktmbuf_tailroom(m))) + m->data_len += offset; + else { + struct rte_mbuf *seg = m; - /* Copy ethernet header to mbuf. */ - rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), - rte_pktmbuf_mtod(m, const void *), - ETH_HLEN); - - - /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/ - vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *); - vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto; - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_TCI = htons(vlan_tag); - - /* Copy the remaining packet contents to the mbuf. */ - rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, uint8_t *) + VLAN_ETH_HLEN), - (const void *)(rte_pktmbuf_mtod(m, uint8_t *) + ETH_HLEN), - (m->data_len - ETH_HLEN)); - - /* Copy the remaining segments for the whole packet. */ - prev = mbuf; - while (m->next) { - /* Allocate an mbuf and populate the structure. */ - struct rte_mbuf *next_mbuf = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(next_mbuf == NULL)) { - rte_pktmbuf_free(mbuf); - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - return; - } + while ((seg->next != NULL) && + (offset > rte_pktmbuf_tailroom(seg))) + seg = seg->next; - m = m->next; - prev->next = next_mbuf; - prev = next_mbuf; - next_mbuf->data_len = m->data_len; + seg->data_len += offset; + } + m->pkt_len += offset; + } - /* Copy data to next mbuf. */ - rte_memcpy(rte_pktmbuf_mtod(next_mbuf, void *), - rte_pktmbuf_mtod(m, const void *), m->data_len); + m->vlan_tci = vlan_tag; } - tx_q->m_table[len] = mbuf; + tx_q->m_table[len] = m; len++; if (enable_stats) { dev_statistics[dev->device_fh].tx_total++; @@ -1223,7 +1224,8 @@ switch_worker(__attribute__((unused)) void *arg) const uint16_t lcore_id = rte_lcore_id(); const uint16_t num_cores = (uint16_t)rte_lcore_count(); uint16_t rx_count = 0; - uint32_t mergeable = 0; + uint16_t tx_count; + uint32_t retry = 0; RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id); lcore_ll = lcore_info[lcore_id].lcore_ll; @@ -1282,10 +1284,8 @@ switch_worker(__attribute__((unused)) void *arg) /*get virtio device ID*/ vdev = dev_ll->vdev; dev = vdev->dev; - mergeable = - dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF); - if (vdev->remove) { + if (unlikely(vdev->remove)) { dev_ll = dev_ll->next; unlink_vmdq(vdev); vdev->ready = DEVICE_SAFE_REMOVE; @@ -1297,15 +1297,18 @@ switch_worker(__attribute__((unused)) void *arg) vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); if (rx_count) { - if (likely(mergeable == 0)) - ret_count = - virtio_dev_rx(dev, - pkts_burst, rx_count); - else - ret_count = - virtio_dev_merge_rx(dev, - pkts_burst, rx_count); - + /* + * Retry is enabled and the queue is full then we wait and retry to avoid packet loss + * Here MAX_PKT_BURST must be less than virtio queue size + */ + if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) { + for (retry = 0; retry < burst_rx_retry_num; retry++) { + rte_delay_us(burst_rx_delay_time); + if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ)) + break; + } + } + ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count); if (enable_stats) { rte_atomic64_add( &dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic, @@ -1321,12 +1324,18 @@ switch_worker(__attribute__((unused)) void *arg) } } - if (!vdev->remove) { - /*Handle guest TX*/ - if (likely(mergeable == 0)) - virtio_dev_tx(dev, mbuf_pool); - else - virtio_dev_merge_tx(dev, mbuf_pool); + if (likely(!vdev->remove)) { + /* Handle guest TX*/ + tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST); + /* If this is the first received packet we need to learn the MAC and setup VMDQ */ + if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) { + if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) { + while (tx_count) + rte_pktmbuf_free(pkts_burst[--tx_count]); + } + } + while (tx_count) + virtio_tx_route(vdev, pkts_burst[--tx_count], (uint16_t)dev->device_fh); } /*move to the next device in the list*/ @@ -1424,7 +1433,7 @@ put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx) /* Kick the guest if necessary. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); } /* @@ -1580,7 +1589,7 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) for (index = 0; index < mbuf_count; index++) { mbuf = __rte_mbuf_raw_alloc(vpool->pool); - if (likely(RTE_MBUF_INDIRECT(mbuf))) + if (likely(MBUF_EXT_MEM(mbuf))) pktmbuf_detach_zcp(mbuf); rte_ring_sp_enqueue(vpool->ring, mbuf); @@ -1617,7 +1626,7 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) /* Kick guest if required. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); return 0; } @@ -1643,7 +1652,7 @@ static void mbuf_destroy_zcp(struct vpool *vpool) for (index = 0; index < mbuf_count; index++) { mbuf = __rte_mbuf_raw_alloc(vpool->pool); if (likely(mbuf != NULL)) { - if (likely(RTE_MBUF_INDIRECT(mbuf))) + if (likely(MBUF_EXT_MEM(mbuf))) pktmbuf_detach_zcp(mbuf); rte_ring_sp_enqueue(vpool->ring, (void *)mbuf); } @@ -1765,7 +1774,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts, /* Kick the guest if necessary. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); return count; } @@ -1783,8 +1792,6 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m, struct rte_mbuf *mbuf = NULL; unsigned len, ret, offset = 0; struct vpool *vpool; - struct virtio_net_data_ll *dev_ll = ll_root_used; - struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh]; uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q; @@ -1813,46 +1820,10 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m, * such a ambiguous situation, so pkt will lost. */ vlan_tag = external_pkt_default_vlan_tag; - while (dev_ll != NULL) { - if (likely(dev_ll->vdev->ready == DEVICE_RX) && - ether_addr_cmp(&(pkt_hdr->d_addr), - &dev_ll->vdev->mac_address)) { - - /* - * Drop the packet if the TX packet is destined - * for the TX device. - */ - if (unlikely(dev_ll->vdev->dev->device_fh - == dev->device_fh)) { - LOG_DEBUG(VHOST_DATA, - "(%"PRIu64") TX: Source and destination" - "MAC addresses are the same. Dropping " - "packet.\n", - dev_ll->vdev->dev->device_fh); - MBUF_HEADROOM_UINT32(mbuf) - = (uint32_t)desc_idx; - __rte_mbuf_raw_free(mbuf); - return; - } - - /* - * Packet length offset 4 bytes for HW vlan - * strip when L2 switch back. - */ - offset = 4; - vlan_tag = - (uint16_t) - vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh]; - - LOG_DEBUG(VHOST_DATA, - "(%"PRIu64") TX: pkt to local VM device id:" - "(%"PRIu64") vlan tag: %d.\n", - dev->device_fh, dev_ll->vdev->dev->device_fh, - vlan_tag); - - break; - } - dev_ll = dev_ll->next; + if (find_local_dest(dev, m, &offset, &vlan_tag) != 0) { + MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; + __rte_mbuf_raw_free(mbuf); + return; } } @@ -1976,7 +1947,9 @@ virtio_dev_tx_zcp(struct virtio_net *dev) /* Buffer address translation. */ buff_addr = gpa_to_vva(dev, desc->addr); - phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len, &addr_type); + /* Need check extra VLAN_HLEN size for inserting VLAN tag */ + phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len + VLAN_HLEN, + &addr_type); if (likely(packet_success < (free_entries - 1))) /* Prefetch descriptor index. */ @@ -2336,16 +2309,6 @@ init_data_ll (void) return 0; } -/* - * Set virtqueue flags so that we do not receive interrupts. - */ -static void -set_irq_status (struct virtio_net *dev) -{ - dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY; - dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY; -} - /* * Remove a device from the specific data core linked list and from the main linked list. Synchonization * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering @@ -2631,7 +2594,7 @@ new_device (struct virtio_net *dev) struct vhost_dev *vdev; uint32_t regionidx; - vdev = rte_zmalloc("vhost device", sizeof(*vdev), CACHE_LINE_SIZE); + vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); if (vdev == NULL) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n", dev->device_fh); @@ -2651,9 +2614,10 @@ new_device (struct virtio_net *dev) } - vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region", - sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa, - CACHE_LINE_SIZE); + vdev->regions_hpa = rte_calloc("vhost hpa region", + vdev->nregions_hpa, + sizeof(struct virtio_memory_regions_hpa), + RTE_CACHE_LINE_SIZE); if (vdev->regions_hpa == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n"); rte_free(vdev); @@ -2689,7 +2653,7 @@ new_device (struct virtio_net *dev) ll_dev->vdev = vdev; add_data_ll_entry(&ll_root_used, ll_dev); vdev->vmdq_rx_q - = dev->device_fh * (num_queues / num_devices); + = dev->device_fh * queues_per_pool + vmdq_queue_base; if (zero_copy) { uint32_t index = vdev->vmdq_rx_q; @@ -2777,27 +2741,26 @@ new_device (struct virtio_net *dev) } } /* Add device to lcore ll */ - ll_dev->dev->coreid = core_add; - ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free); + ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free); if (ll_dev == NULL) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); vdev->ready = DEVICE_SAFE_REMOVE; destroy_device(dev); - if (vdev->regions_hpa) - rte_free(vdev->regions_hpa); + rte_free(vdev->regions_hpa); rte_free(vdev); return -1; } ll_dev->vdev = vdev; vdev->coreid = core_add; - add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev); + add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_dev); /* Initialize device stats */ memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics)); /* Disable notifications. */ - set_irq_status(dev); + rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0); + rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0); lcore_info[vdev->coreid].lcore_ll->device_num++; dev->flags |= VIRTIO_DEV_RUNNING; @@ -2880,12 +2843,8 @@ static void setup_mempool_tbl(int socket, uint32_t index, char *pool_name, char *ring_name, uint32_t nb_mbuf) { - uint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM; - vpool_array[index].pool - = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP, - MBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize, - rte_pktmbuf_init, NULL, socket, 0); + vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket); if (vpool_array[index].pool != NULL) { vpool_array[index].ring = rte_ring_create(ring_name, @@ -2906,7 +2865,7 @@ setup_mempool_tbl(int socket, uint32_t index, char *pool_name, } /* Need consider head room. */ - vpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM; + vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP; } else { rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name); } @@ -2918,13 +2877,14 @@ setup_mempool_tbl(int socket, uint32_t index, char *pool_name, * device is also registered here to handle the IOCTLs. */ int -MAIN(int argc, char *argv[]) +main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool = NULL; unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; int ret; - uint8_t portid, queue_id = 0; + uint8_t portid; + uint16_t queue_id; static pthread_t tid; /* init EAL */ @@ -2968,15 +2928,9 @@ MAIN(int argc, char *argv[]) if (zero_copy == 0) { /* Create the mbuf pool. */ - mbuf_pool = rte_mempool_create( - "MBUF_POOL", - NUM_MBUFS_PER_PORT - * valid_num_ports, - MBUF_SIZE, MBUF_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", + NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, + 0, MBUF_DATA_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); @@ -2994,14 +2948,6 @@ MAIN(int argc, char *argv[]) char pool_name[RTE_MEMPOOL_NAMESIZE]; char ring_name[RTE_MEMPOOL_NAMESIZE]; - /* - * Zero copy defers queue RX/TX start to the time when guest - * finishes its startup and packet buffers from that guest are - * available. - */ - rx_conf_default.rx_deferred_start = (uint8_t)zero_copy; - rx_conf_default.rx_drop_en = 0; - tx_conf_default.tx_deferred_start = (uint8_t)zero_copy; nb_mbuf = num_rx_descriptor + num_switching_cores * MBUF_CACHE_SIZE_ZCP + num_switching_cores * MAX_PKT_BURST; @@ -3089,10 +3035,10 @@ MAIN(int argc, char *argv[]) } LOG_DEBUG(VHOST_CONFIG, - "in MAIN: mbuf count in mempool at initial " + "in main: mbuf count in mempool at initial " "is: %d\n", count_in_mempool); LOG_DEBUG(VHOST_CONFIG, - "in MAIN: mbuf count in ring at initial is :" + "in main: mbuf count in ring at initial is :" " %d\n", rte_ring_count(vpool_array[index].ring)); } @@ -3102,15 +3048,18 @@ MAIN(int argc, char *argv[]) lcore_id); } + if (mergeable == 0) + rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); + /* Register CUSE device to handle IOCTLs. */ - ret = register_cuse_device((char*)&dev_basename, dev_index, get_virtio_net_callbacks()); + ret = rte_vhost_driver_register((char *)&dev_basename); if (ret != 0) rte_exit(EXIT_FAILURE,"CUSE device setup failure.\n"); - init_virtio_net(&virtio_net_device_ops); + rte_vhost_driver_callback_register(&virtio_net_device_ops); /* Start CUSE session. */ - start_cuse_session_loop(); + rte_vhost_driver_session_start(); return 0; }