#include "main.h"
-#define MAX_QUEUES 256
+#define MAX_QUEUES 512
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
/*
* Calculate the number of buffers needed per port
*/
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
+#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
(num_switching_cores*MAX_PKT_BURST) + \
(num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
(num_switching_cores*MBUF_CACHE_SIZE))
-#define MBUF_CACHE_SIZE 128
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_CACHE_SIZE 128
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
/*
* No frame data buffer allocated from host are required for zero copy
* implementation, guest will allocate the frame data buffer, and vhost
* directly use it.
*/
-#define VIRTIO_DESCRIPTOR_LEN_ZCP 1518
-#define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \
- + RTE_PKTMBUF_HEADROOM)
+#define VIRTIO_DESCRIPTOR_LEN_ZCP RTE_MBUF_DEFAULT_DATAROOM
+#define MBUF_DATA_SIZE_ZCP RTE_MBUF_DEFAULT_BUF_SIZE
#define MBUF_CACHE_SIZE_ZCP 0
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-#define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
+#define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
#define BURST_RX_RETRIES 4 /* Number of retries on RX. */
#define JUMBO_FRAME_MAX_SIZE 0x2600
/* Number of descriptors per cacheline. */
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
+#define MBUF_EXT_MEM(mb) (rte_mbuf_from_indirect(mb) != (mb))
+
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
static uint32_t zero_copy;
static int mergeable;
+/* Do vlan strip on host, enabled on default */
+static uint32_t vlan_strip = 1;
+
/* number of descriptors to apply*/
static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP;
static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP;
/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
rte_eth_dev_info_get (port, &dev_info);
+ if (dev_info.max_rx_queues > MAX_QUEUES) {
+ rte_exit(EXIT_FAILURE,
+ "please define MAX_QUEUES no less than %u in %s\n",
+ dev_info.max_rx_queues, __FILE__);
+ }
+
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
rxconf->rx_drop_en = 1;
+ /* Enable vlan offload */
+ txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
+
/*
* Zero copy defers queue RX/TX start to the time when guest
* finishes its startup and packet buffers from that guest are
" --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
" --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
" --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
+ " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
" --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
" --dev-basename: The basename to be used for the character device.\n"
" --zero-copy [0|1]: disable(default)/enable rx/tx "
{"rx-retry-delay", required_argument, NULL, 0},
{"rx-retry-num", required_argument, NULL, 0},
{"mergeable", required_argument, NULL, 0},
+ {"vlan-strip", required_argument, NULL, 0},
{"stats", required_argument, NULL, 0},
{"dev-basename", required_argument, NULL, 0},
{"zero-copy", required_argument, NULL, 0},
}
}
+ /* Enable/disable RX VLAN strip on host. */
+ if (!strncmp(long_option[option_index].name,
+ "vlan-strip", MAX_LONG_OPT_SZ)) {
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Invalid argument for VLAN strip [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ } else {
+ vlan_strip = !!ret;
+ vmdq_conf_default.rxmode.hw_vlan_strip =
+ vlan_strip;
+ }
+ }
+
/* Enable/disable stats. */
if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
return -1;
} else
zero_copy = ret;
-
- if (zero_copy) {
-#ifdef RTE_MBUF_REFCNT
- RTE_LOG(ERR, VHOST_CONFIG, "Before running "
- "zero copy vhost APP, please "
- "disable RTE_MBUF_REFCNT\n"
- "in config file and then rebuild DPDK "
- "core lib!\n"
- "Otherwise please disable zero copy "
- "flag in command line!\n");
- return -1;
-#endif
- }
}
/* Specify the descriptor number on RX. */
dev->device_fh);
/* Enable stripping of the vlan tag as we handle routing. */
- rte_eth_dev_set_vlan_strip_on_queue(ports[0], (uint16_t)vdev->vmdq_rx_q, 1);
+ if (vlan_strip)
+ rte_eth_dev_set_vlan_strip_on_queue(ports[0],
+ (uint16_t)vdev->vmdq_rx_q, 1);
/* Set device as ready for RX. */
vdev->ready = DEVICE_RX;
"(%"PRIu64") TX: pkt to local VM device id:"
"(%"PRIu64") vlan tag: %d.\n",
dev->device_fh, dev_ll->vdev->dev->device_fh,
- vlan_tag);
+ (int)*vlan_tag);
break;
}
unsigned len, ret, offset = 0;
const uint16_t lcore_id = rte_lcore_id();
struct virtio_net *dev = vdev->dev;
+ struct ether_hdr *nh;
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
return;
}
- if (vm2vm_mode == VM2VM_HARDWARE) {
- if (find_local_dest(dev, m, &offset, &vlan_tag) != 0 ||
- offset > rte_pktmbuf_tailroom(m)) {
+ if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
+ if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) {
rte_pktmbuf_free(m);
return;
}
tx_q = &lcore_tx_queue[lcore_id];
len = tx_q->len;
- m->ol_flags = PKT_TX_VLAN_PKT;
+ nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
+ /* Guest has inserted the vlan tag. */
+ struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
+ uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
+ if ((vm2vm_mode == VM2VM_HARDWARE) &&
+ (vh->vlan_tci != vlan_tag_be))
+ vh->vlan_tci = vlan_tag_be;
+ } else {
+ m->ol_flags = PKT_TX_VLAN_PKT;
+
+ /*
+ * Find the right seg to adjust the data len when offset is
+ * bigger than tail room size.
+ */
+ if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
+ if (likely(offset <= rte_pktmbuf_tailroom(m)))
+ m->data_len += offset;
+ else {
+ struct rte_mbuf *seg = m;
- m->data_len += offset;
- m->pkt_len += offset;
+ while ((seg->next != NULL) &&
+ (offset > rte_pktmbuf_tailroom(seg)))
+ seg = seg->next;
- m->vlan_tci = vlan_tag;
+ seg->data_len += offset;
+ }
+ m->pkt_len += offset;
+ }
+
+ m->vlan_tci = vlan_tag;
+ }
tx_q->m_table[len] = m;
len++;
/* If this is the first received packet we need to learn the MAC and setup VMDQ */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
- while (tx_count--)
- rte_pktmbuf_free(pkts_burst[tx_count]);
+ while (tx_count)
+ rte_pktmbuf_free(pkts_burst[--tx_count]);
}
}
while (tx_count)
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
+ eventfd_write(vq->callfd, (eventfd_t)1);
}
/*
static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
{
const struct rte_mempool *mp = m->pool;
- void *buf = RTE_MBUF_TO_BADDR(m);
+ void *buf = rte_mbuf_to_baddr(m);
uint32_t buf_ofs;
uint32_t buf_len = mp->elt_size - sizeof(*m);
m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);
for (index = 0; index < mbuf_count; index++) {
mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(RTE_MBUF_INDIRECT(mbuf)))
+ if (likely(MBUF_EXT_MEM(mbuf)))
pktmbuf_detach_zcp(mbuf);
rte_ring_sp_enqueue(vpool->ring, mbuf);
/* Kick guest if required. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
+ eventfd_write(vq->callfd, (eventfd_t)1);
return 0;
}
for (index = 0; index < mbuf_count; index++) {
mbuf = __rte_mbuf_raw_alloc(vpool->pool);
if (likely(mbuf != NULL)) {
- if (likely(RTE_MBUF_INDIRECT(mbuf)))
+ if (likely(MBUF_EXT_MEM(mbuf)))
pktmbuf_detach_zcp(mbuf);
rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
}
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
+ eventfd_write(vq->callfd, (eventfd_t)1);
return count;
}
}
- vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region",
- sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa,
- RTE_CACHE_LINE_SIZE);
+ vdev->regions_hpa = rte_calloc("vhost hpa region",
+ vdev->nregions_hpa,
+ sizeof(struct virtio_memory_regions_hpa),
+ RTE_CACHE_LINE_SIZE);
if (vdev->regions_hpa == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
rte_free(vdev);
RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
vdev->ready = DEVICE_SAFE_REMOVE;
destroy_device(dev);
- if (vdev->regions_hpa)
- rte_free(vdev->regions_hpa);
+ rte_free(vdev->regions_hpa);
rte_free(vdev);
return -1;
}
setup_mempool_tbl(int socket, uint32_t index, char *pool_name,
char *ring_name, uint32_t nb_mbuf)
{
- uint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM;
- vpool_array[index].pool
- = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP,
- MBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize,
- rte_pktmbuf_init, NULL, socket, 0);
+ vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket);
if (vpool_array[index].pool != NULL) {
vpool_array[index].ring
= rte_ring_create(ring_name,
}
/* Need consider head room. */
- vpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM;
+ vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP;
} else {
rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name);
}
}
+/* When we receive a INT signal, unregister vhost driver */
+static void
+sigint_handler(__rte_unused int signum)
+{
+ /* Unregister vhost driver. */
+ int ret = rte_vhost_driver_unregister((char *)&dev_basename);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
+ exit(0);
+}
/*
* Main function, does initialisation and calls the per-lcore functions. The CUSE
uint8_t portid;
uint16_t queue_id;
static pthread_t tid;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ signal(SIGINT, sigint_handler);
/* init EAL */
ret = rte_eal_init(argc, argv);
if (zero_copy == 0) {
/* Create the mbuf pool. */
- mbuf_pool = rte_mempool_create(
- "MBUF_POOL",
- NUM_MBUFS_PER_PORT
- * valid_num_ports,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
+ NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE,
+ 0, MBUF_DATA_SIZE, rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
memset(&dev_statistics, 0, sizeof(dev_statistics));
/* Enable stats if the user option is set. */
- if (enable_stats)
- pthread_create(&tid, NULL, (void*)print_stats, NULL );
+ if (enable_stats) {
+ ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create print-stats thread\n");
+
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
+ ret = pthread_setname_np(tid, thread_name);
+ if (ret != 0)
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Cannot set print-stats name\n");
+ }
/* Launch all data cores. */
if (zero_copy == 0) {
if (mergeable == 0)
rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
- /* Register CUSE device to handle IOCTLs. */
+ /* Register vhost(cuse or user) driver to handle vhost messages. */
ret = rte_vhost_driver_register((char *)&dev_basename);
if (ret != 0)
- rte_exit(EXIT_FAILURE,"CUSE device setup failure.\n");
+ rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
rte_vhost_driver_callback_register(&virtio_net_device_ops);
return 0;
}
-