#include <sys/types.h>
#include <sys/stat.h>
+#include <rte_eal_memconfig.h>
+
#include "vhost.h"
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
static int
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
state.index = queue_sel;
state.num = 0; /* no reservation */
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
+ state.num |= (1 << 15);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t features;
int ret;
- if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
- return -1;
+ /*
+ * XXX workaround!
+ *
+ * We need to make sure that the locks will be
+ * taken in the correct order to avoid deadlocks.
+ *
+ * Before releasing this lock, this thread should
+ * not trigger any memory hotplug events.
+ *
+ * This is a temporary workaround, and should be
+ * replaced when we get proper supports from the
+ * memory subsystem in the future.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ pthread_mutex_lock(&dev->mutex);
- /* Do not check return as already done in init, or reset in stop */
- dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
+ if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+ goto error;
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
*/
dev->ops->enable_qp(dev, 0, 1);
+ dev->started = true;
+ pthread_mutex_unlock(&dev->mutex);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
return 0;
error:
+ pthread_mutex_unlock(&dev->mutex);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
/* TODO: free resource here or caller to check */
return -1;
}
int virtio_user_stop_device(struct virtio_user_dev *dev)
{
+ struct vhost_vring_state state;
uint32_t i;
+ int error = 0;
+
+ pthread_mutex_lock(&dev->mutex);
+ if (!dev->started)
+ goto out;
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
- if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
- PMD_DRV_LOG(INFO, "Failed to reset the device\n");
- return -1;
+ /* Stop the backend. */
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ state.index = i;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
+ &state) < 0) {
+ PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
+ i);
+ error = -1;
+ goto out;
+ }
}
- return 0;
+ dev->started = false;
+out:
+ pthread_mutex_unlock(&dev->mutex);
+
+ return error;
}
static inline void
return 0;
}
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+ const void *addr __rte_unused,
+ size_t len __rte_unused,
+ void *arg)
+{
+ struct virtio_user_dev *dev = arg;
+ struct rte_memseg_list *msl;
+ uint16_t i;
+
+ /* ignore externally allocated memory */
+ msl = rte_mem_virt2memseg_list(addr);
+ if (msl->external)
+ return;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->started == false)
+ goto exit;
+
+ /* Step 1: pause the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 0);
+
+ /* Step 2: update memory regions */
+ dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+ /* Step 3: resume the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 1);
+
+exit:
+ pthread_mutex_unlock(&dev->mutex);
+}
+
static int
virtio_user_dev_setup(struct virtio_user_dev *dev)
{
PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
- dev->ops = &ops_kernel;
+ dev->ops = &virtio_ops_kernel;
dev->vhostfds = malloc(dev->max_queue_pairs *
sizeof(int));
1ULL << VIRTIO_NET_F_GUEST_CSUM | \
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
- 1ULL << VIRTIO_F_VERSION_1)
+ 1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_RING_PACKED)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname)
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order, int packed_vq)
{
+ pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
+ dev->started = 0;
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
+ dev->frontend_features = 0;
+ dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
parse_mac(dev, mac);
if (*ifname) {
return -1;
}
- if (dev->vhostfd >= 0) {
+ if (!dev->is_server) {
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s",
dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
- if (dev->mac_specified)
+ if (!mrg_rxbuf)
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
+
+ if (!in_order)
+ dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
+
+ if (packed_vq)
+ dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
+ else
+ dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+
+ if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
* so if necessary, we just claim to support CQ
*/
- dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
} else {
- dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
- /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
- dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
- dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
- dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
- dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
- dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
/* The backend will not report this feature, we add it explicitly */
if (is_vhost_user_by_type(dev->path))
- dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
+ dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
- dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ /*
+ * Device features =
+ * (frontend_features | backend_features) & ~unsupported_features;
+ */
+ dev->device_features |= dev->frontend_features;
+ dev->device_features &= ~dev->unsupported_features;
+
+ if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+ virtio_user_mem_event_cb, dev)) {
+ if (rte_errno != ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to register mem event"
+ " callback\n");
+ return -1;
+ }
+ }
return 0;
}
virtio_user_stop_device(dev);
+ rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
close(dev->callfds[i]);
close(dev->kickfds[i]);
unlink(dev->path);
}
-static uint8_t
+uint8_t
virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
{
uint16_t i;
return -1;
}
- for (i = 0; i < q_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 1);
- for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 0);
-
+ /* Server mode can't enable queue pairs if vhostfd is invalid,
+ * always return 0 in this case.
+ */
+ if (!dev->is_server || dev->vhostfd >= 0) {
+ for (i = 0; i < q_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 0);
+ }
dev->queue_pairs = q_pairs;
return ret;