X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost%2Fmain.c;h=22d6a4b92aaa8367a6003dfc66808afd1203dc07;hb=ea0c20ea95fd5d71a10757e6598ac66233ea1495;hp=a93f7a01d71300df61a07dc3619e01c907bb6d05;hpb=6630bc42449aebba33b0817ae830cbdf3a83a1b6;p=dpdk.git diff --git a/examples/vhost/main.c b/examples/vhost/main.c index a93f7a01d7..22d6a4b92a 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -53,7 +53,7 @@ #include "main.h" -#define MAX_QUEUES 128 +#define MAX_QUEUES 512 /* the maximum number of external ports supported */ #define MAX_SUP_PORTS 1 @@ -67,7 +67,7 @@ (num_switching_cores*MBUF_CACHE_SIZE)) #define MBUF_CACHE_SIZE 128 -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define MBUF_DATA_SIZE (2048 + RTE_PKTMBUF_HEADROOM) /* * No frame data buffer allocated from host are required for zero copy @@ -75,29 +75,9 @@ * directly use it. */ #define VIRTIO_DESCRIPTOR_LEN_ZCP 1518 -#define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \ - + RTE_PKTMBUF_HEADROOM) +#define MBUF_DATA_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM) #define MBUF_CACHE_SIZE_ZCP 0 -/* - * RX and TX Prefetch, Host, and Write-back threshold values should be - * carefully set for optimal performance. Consult the network - * controller's datasheet and supporting DPDK documentation for guidance - * on how these parameters should be set. - */ -#define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */ -#define RX_HTHRESH 8 /* Default values of RX host threshold reg. */ -#define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */ - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */ -#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */ -#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */ - #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -156,11 +136,16 @@ #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL /* Number of descriptors per cacheline. */ -#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc)) +#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) + +#define MBUF_EXT_MEM(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb)) /* mask of enabled ports */ static uint32_t enabled_port_mask = 0; +/* Promiscuous mode */ +static uint32_t promiscuous; + /*Number of switching cores enabled*/ static uint32_t num_switching_cores = 0; @@ -175,6 +160,9 @@ static uint32_t num_devices; static uint32_t zero_copy; static int mergeable; +/* Do vlan strip on host, enabled on default */ +static uint32_t vlan_strip = 1; + /* number of descriptors to apply*/ static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP; static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP; @@ -217,32 +205,6 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; /* Character device basename. Can be set by user. */ static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; - -/* Default configuration for rx and tx thresholds etc. */ -static struct rte_eth_rxconf rx_conf_default = { - .rx_thresh = { - .pthresh = RX_PTHRESH, - .hthresh = RX_HTHRESH, - .wthresh = RX_WTHRESH, - }, - .rx_drop_en = 1, -}; - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -static struct rte_eth_txconf tx_conf_default = { - .tx_thresh = { - .pthresh = TX_PTHRESH, - .hthresh = TX_HTHRESH, - .wthresh = TX_WTHRESH, - }, - .tx_free_thresh = 0, /* Use PMD default values */ - .tx_rs_thresh = 0, /* Use PMD default values */ -}; - /* empty vmdq configuration structure. Filled in programatically */ static struct rte_eth_conf vmdq_conf_default = { .rxmode = { @@ -282,6 +244,9 @@ static struct rte_eth_conf vmdq_conf_default = { static unsigned lcore_ids[RTE_MAX_LCORE]; static uint8_t ports[RTE_MAX_ETHPORTS]; static unsigned num_ports = 0; /**< The number of ports specified in command line */ +static uint16_t num_pf_queues, num_vmdq_queues; +static uint16_t vmdq_pool_base, vmdq_queue_base; +static uint16_t queues_per_pool; static const uint16_t external_pkt_default_vlan_tag = 2000; const uint16_t vlan_tags[] = { @@ -364,13 +329,15 @@ static inline int get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) { struct rte_eth_vmdq_rx_conf conf; + struct rte_eth_vmdq_rx_conf *def_conf = + &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf; unsigned i; memset(&conf, 0, sizeof(conf)); conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; conf.nb_pool_maps = num_devices; - conf.enable_loop_back = - vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back; + conf.enable_loop_back = def_conf->enable_loop_back; + conf.rx_mode = def_conf->rx_mode; for (i = 0; i < conf.nb_pool_maps; i++) { conf.pool_map[i].vlan_id = vlan_tags[ i ]; @@ -407,7 +374,9 @@ port_init(uint8_t port) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; - uint16_t rx_rings, tx_rings; + struct rte_eth_rxconf *rxconf; + struct rte_eth_txconf *txconf; + int16_t rx_rings, tx_rings; uint16_t rx_ring_size, tx_ring_size; int retval; uint16_t q; @@ -415,9 +384,32 @@ port_init(uint8_t port) /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ rte_eth_dev_info_get (port, &dev_info); + if (dev_info.max_rx_queues > MAX_QUEUES) { + rte_exit(EXIT_FAILURE, + "please define MAX_QUEUES no less than %u in %s\n", + dev_info.max_rx_queues, __FILE__); + } + + rxconf = &dev_info.default_rxconf; + txconf = &dev_info.default_txconf; + rxconf->rx_drop_en = 1; + + /* Enable vlan offload */ + txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; + + /* + * Zero copy defers queue RX/TX start to the time when guest + * finishes its startup and packet buffers from that guest are + * available. + */ + if (zero_copy) { + rxconf->rx_deferred_start = 1; + rxconf->rx_drop_en = 0; + txconf->tx_deferred_start = 1; + } + /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; - num_queues = dev_info.max_rx_queues; if (zero_copy) { rx_ring_size = num_rx_descriptor; @@ -437,10 +429,19 @@ port_init(uint8_t port) retval = get_eth_conf(&port_conf, num_devices); if (retval < 0) return retval; + /* NIC queues are divided into pf queues and vmdq queues. */ + num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; + queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; + num_vmdq_queues = num_devices * queues_per_pool; + num_queues = num_pf_queues + num_vmdq_queues; + vmdq_queue_base = dev_info.vmdq_queue_base; + vmdq_pool_base = dev_info.vmdq_pool_base; + printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", + num_pf_queues, num_devices, queues_per_pool); if (port >= rte_eth_dev_count()) return -1; - rx_rings = (uint16_t)num_queues, + rx_rings = (uint16_t)dev_info.max_rx_queues; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) @@ -449,14 +450,16 @@ port_init(uint8_t port) /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, - rte_eth_dev_socket_id(port), &rx_conf_default, + rte_eth_dev_socket_id(port), + rxconf, vpool_array[q].pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, - rte_eth_dev_socket_id(port), &tx_conf_default); + rte_eth_dev_socket_id(port), + txconf); if (retval < 0) return retval; } @@ -468,6 +471,9 @@ port_init(uint8_t port) return retval; } + if (promiscuous) + rte_eth_promiscuous_enable(port); + rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 @@ -562,6 +568,7 @@ us_vhost_usage(const char *prgname) " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" + " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" " --dev-basename: The basename to be used for the character device.\n" " --zero-copy [0|1]: disable(default)/enable rx/tx " @@ -589,6 +596,7 @@ us_vhost_parse_args(int argc, char **argv) {"rx-retry-delay", required_argument, NULL, 0}, {"rx-retry-num", required_argument, NULL, 0}, {"mergeable", required_argument, NULL, 0}, + {"vlan-strip", required_argument, NULL, 0}, {"stats", required_argument, NULL, 0}, {"dev-basename", required_argument, NULL, 0}, {"zero-copy", required_argument, NULL, 0}, @@ -598,7 +606,8 @@ us_vhost_parse_args(int argc, char **argv) }; /* Parse command line */ - while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) { + while ((opt = getopt_long(argc, argv, "p:P", + long_option, &option_index)) != EOF) { switch (opt) { /* Portmask */ case 'p': @@ -610,6 +619,15 @@ us_vhost_parse_args(int argc, char **argv) } break; + case 'P': + promiscuous = 1; + vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = + ETH_VMDQ_ACCEPT_BROADCAST | + ETH_VMDQ_ACCEPT_MULTICAST; + rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX); + + break; + case 0: /* Enable/disable vm2vm comms. */ if (!strncmp(long_option[option_index].name, "vm2vm", @@ -679,6 +697,22 @@ us_vhost_parse_args(int argc, char **argv) } } + /* Enable/disable RX VLAN strip on host. */ + if (!strncmp(long_option[option_index].name, + "vlan-strip", MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, 1); + if (ret == -1) { + RTE_LOG(INFO, VHOST_CONFIG, + "Invalid argument for VLAN strip [0|1]\n"); + us_vhost_usage(prgname); + return -1; + } else { + vlan_strip = !!ret; + vmdq_conf_default.rxmode.hw_vlan_strip = + vlan_strip; + } + } + /* Enable/disable stats. */ if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { ret = parse_num_opt(optarg, INT32_MAX); @@ -712,19 +746,6 @@ us_vhost_parse_args(int argc, char **argv) return -1; } else zero_copy = ret; - - if (zero_copy) { -#ifdef RTE_MBUF_REFCNT - RTE_LOG(ERR, VHOST_CONFIG, "Before running " - "zero copy vhost APP, please " - "disable RTE_MBUF_REFCNT\n" - "in config file and then rebuild DPDK " - "core lib!\n" - "Otherwise please disable zero copy " - "flag in command line!\n"); - return -1; -#endif - } } /* Specify the descriptor number on RX. */ @@ -931,13 +952,16 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) vdev->vlan_tag); /* Register the MAC address. */ - ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, (uint32_t)dev->device_fh); + ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, + (uint32_t)dev->device_fh + vmdq_pool_base); if (ret) RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", dev->device_fh); /* Enable stripping of the vlan tag as we handle routing. */ - rte_eth_dev_set_vlan_strip_on_queue(ports[0], (uint16_t)vdev->vmdq_rx_q, 1); + if (vlan_strip) + rte_eth_dev_set_vlan_strip_on_queue(ports[0], + (uint16_t)vdev->vmdq_rx_q, 1); /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; @@ -1102,6 +1126,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) unsigned len, ret, offset = 0; const uint16_t lcore_id = rte_lcore_id(); struct virtio_net *dev = vdev->dev; + struct ether_hdr *nh; /*check if destination is local VM*/ if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) { @@ -1109,9 +1134,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) return; } - if (vm2vm_mode == VM2VM_HARDWARE) { - if (find_local_dest(dev, m, &offset, &vlan_tag) != 0 || - offset > rte_pktmbuf_tailroom(m)) { + if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { + if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) { rte_pktmbuf_free(m); return; } @@ -1123,12 +1147,38 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) tx_q = &lcore_tx_queue[lcore_id]; len = tx_q->len; - m->ol_flags = PKT_TX_VLAN_PKT; + nh = rte_pktmbuf_mtod(m, struct ether_hdr *); + if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) { + /* Guest has inserted the vlan tag. */ + struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1); + uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); + if ((vm2vm_mode == VM2VM_HARDWARE) && + (vh->vlan_tci != vlan_tag_be)) + vh->vlan_tci = vlan_tag_be; + } else { + m->ol_flags = PKT_TX_VLAN_PKT; - m->data_len += offset; - m->pkt_len += offset; + /* + * Find the right seg to adjust the data len when offset is + * bigger than tail room size. + */ + if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { + if (likely(offset <= rte_pktmbuf_tailroom(m))) + m->data_len += offset; + else { + struct rte_mbuf *seg = m; + + while ((seg->next != NULL) && + (offset > rte_pktmbuf_tailroom(seg))) + seg = seg->next; + + seg->data_len += offset; + } + m->pkt_len += offset; + } - m->vlan_tci = vlan_tag; + m->vlan_tci = vlan_tag; + } tx_q->m_table[len] = m; len++; @@ -1280,8 +1330,8 @@ switch_worker(__attribute__((unused)) void *arg) /* If this is the first received packet we need to learn the MAC and setup VMDQ */ if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) { if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) { - while (tx_count--) - rte_pktmbuf_free(pkts_burst[tx_count]); + while (tx_count) + rte_pktmbuf_free(pkts_burst[--tx_count]); } } while (tx_count) @@ -1383,7 +1433,7 @@ put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx) /* Kick the guest if necessary. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); } /* @@ -1539,7 +1589,7 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) for (index = 0; index < mbuf_count; index++) { mbuf = __rte_mbuf_raw_alloc(vpool->pool); - if (likely(RTE_MBUF_INDIRECT(mbuf))) + if (likely(MBUF_EXT_MEM(mbuf))) pktmbuf_detach_zcp(mbuf); rte_ring_sp_enqueue(vpool->ring, mbuf); @@ -1576,7 +1626,7 @@ txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) /* Kick guest if required. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); return 0; } @@ -1602,7 +1652,7 @@ static void mbuf_destroy_zcp(struct vpool *vpool) for (index = 0; index < mbuf_count; index++) { mbuf = __rte_mbuf_raw_alloc(vpool->pool); if (likely(mbuf != NULL)) { - if (likely(RTE_MBUF_INDIRECT(mbuf))) + if (likely(MBUF_EXT_MEM(mbuf))) pktmbuf_detach_zcp(mbuf); rte_ring_sp_enqueue(vpool->ring, (void *)mbuf); } @@ -1724,7 +1774,7 @@ virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts, /* Kick the guest if necessary. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) - eventfd_write((int)vq->kickfd, 1); + eventfd_write((int)vq->callfd, 1); return count; } @@ -2544,7 +2594,7 @@ new_device (struct virtio_net *dev) struct vhost_dev *vdev; uint32_t regionidx; - vdev = rte_zmalloc("vhost device", sizeof(*vdev), CACHE_LINE_SIZE); + vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); if (vdev == NULL) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n", dev->device_fh); @@ -2564,9 +2614,10 @@ new_device (struct virtio_net *dev) } - vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region", - sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa, - CACHE_LINE_SIZE); + vdev->regions_hpa = rte_calloc("vhost hpa region", + vdev->nregions_hpa, + sizeof(struct virtio_memory_regions_hpa), + RTE_CACHE_LINE_SIZE); if (vdev->regions_hpa == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n"); rte_free(vdev); @@ -2602,7 +2653,7 @@ new_device (struct virtio_net *dev) ll_dev->vdev = vdev; add_data_ll_entry(&ll_root_used, ll_dev); vdev->vmdq_rx_q - = dev->device_fh * (num_queues / num_devices); + = dev->device_fh * queues_per_pool + vmdq_queue_base; if (zero_copy) { uint32_t index = vdev->vmdq_rx_q; @@ -2695,8 +2746,7 @@ new_device (struct virtio_net *dev) RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); vdev->ready = DEVICE_SAFE_REMOVE; destroy_device(dev); - if (vdev->regions_hpa) - rte_free(vdev->regions_hpa); + rte_free(vdev->regions_hpa); rte_free(vdev); return -1; } @@ -2793,12 +2843,8 @@ static void setup_mempool_tbl(int socket, uint32_t index, char *pool_name, char *ring_name, uint32_t nb_mbuf) { - uint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM; - vpool_array[index].pool - = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP, - MBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize, - rte_pktmbuf_init, NULL, socket, 0); + vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket); if (vpool_array[index].pool != NULL) { vpool_array[index].ring = rte_ring_create(ring_name, @@ -2819,7 +2865,7 @@ setup_mempool_tbl(int socket, uint32_t index, char *pool_name, } /* Need consider head room. */ - vpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM; + vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP; } else { rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name); } @@ -2831,13 +2877,14 @@ setup_mempool_tbl(int socket, uint32_t index, char *pool_name, * device is also registered here to handle the IOCTLs. */ int -MAIN(int argc, char *argv[]) +main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool = NULL; unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; int ret; - uint8_t portid, queue_id = 0; + uint8_t portid; + uint16_t queue_id; static pthread_t tid; /* init EAL */ @@ -2881,15 +2928,9 @@ MAIN(int argc, char *argv[]) if (zero_copy == 0) { /* Create the mbuf pool. */ - mbuf_pool = rte_mempool_create( - "MBUF_POOL", - NUM_MBUFS_PER_PORT - * valid_num_ports, - MBUF_SIZE, MBUF_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); + mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", + NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, + 0, MBUF_DATA_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); @@ -2907,14 +2948,6 @@ MAIN(int argc, char *argv[]) char pool_name[RTE_MEMPOOL_NAMESIZE]; char ring_name[RTE_MEMPOOL_NAMESIZE]; - /* - * Zero copy defers queue RX/TX start to the time when guest - * finishes its startup and packet buffers from that guest are - * available. - */ - rx_conf_default.rx_deferred_start = (uint8_t)zero_copy; - rx_conf_default.rx_drop_en = 0; - tx_conf_default.tx_deferred_start = (uint8_t)zero_copy; nb_mbuf = num_rx_descriptor + num_switching_cores * MBUF_CACHE_SIZE_ZCP + num_switching_cores * MAX_PKT_BURST; @@ -3002,10 +3035,10 @@ MAIN(int argc, char *argv[]) } LOG_DEBUG(VHOST_CONFIG, - "in MAIN: mbuf count in mempool at initial " + "in main: mbuf count in mempool at initial " "is: %d\n", count_in_mempool); LOG_DEBUG(VHOST_CONFIG, - "in MAIN: mbuf count in ring at initial is :" + "in main: mbuf count in ring at initial is :" " %d\n", rte_ring_count(vpool_array[index].ring)); }