}
int
-is_vhost_user_by_type(const char *path)
+virtio_user_dev_set_features(struct virtio_user_dev *dev)
{
- struct stat sb;
+ uint64_t features;
+ int ret = -1;
- if (stat(path, &sb) == -1)
- return 0;
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
+ dev->vhostfd < 0)
+ goto error;
+
+ /* Step 0: tell vhost to create queues */
+ if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
+ goto error;
+
+ features = dev->features;
+
+ /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
+ features &= ~(1ull << VIRTIO_NET_F_MAC);
+ /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
+ if (ret < 0)
+ goto error;
+ PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
+error:
+ pthread_mutex_unlock(&dev->mutex);
- return S_ISSOCK(sb.st_mode);
+ return ret;
}
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
- uint64_t features;
int ret;
/*
rte_mcfg_mem_read_lock();
pthread_mutex_lock(&dev->mutex);
- if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
- goto error;
-
- /* Step 0: tell vhost to create queues */
- if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
- goto error;
-
- /* Step 1: set features */
- features = dev->features;
- /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
- features &= ~(1ull << VIRTIO_NET_F_MAC);
- /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
- features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
- features &= ~(1ull << VIRTIO_NET_F_STATUS);
- ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
- if (ret < 0)
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
+ dev->vhostfd < 0)
goto error;
- PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
/* Step 2: share memory regions */
ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
static inline void
parse_mac(struct virtio_user_dev *dev, const char *mac)
{
- int i, r;
- uint32_t tmp[RTE_ETHER_ADDR_LEN];
+ struct rte_ether_addr tmp;
if (!mac)
return;
- r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
- &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
- if (r == RTE_ETHER_ADDR_LEN) {
- for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
- dev->mac_addr[i] = (uint8_t)tmp[i];
+ if (rte_ether_unformat_addr(mac, &tmp) == 0) {
+ memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
dev->mac_specified = 1;
} else {
/* ignore the wrong mac, use random mac */
static void
virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
- const void *addr __rte_unused,
- size_t len __rte_unused,
- void *arg)
+ const void *addr,
+ size_t len __rte_unused,
+ void *arg)
{
struct virtio_user_dev *dev = arg;
struct rte_memseg_list *msl;
dev->tapfds = NULL;
if (dev->is_server) {
- if (access(dev->path, F_OK) == 0 &&
- !is_vhost_user_by_type(dev->path)) {
- PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
+ if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
+ PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
return -1;
}
dev->ops = &virtio_ops_user;
} else {
- if (is_vhost_user_by_type(dev->path)) {
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
dev->ops = &virtio_ops_user;
- } else {
+ } else if (dev->backend_type ==
+ VIRTIO_USER_BACKEND_VHOST_KERNEL) {
dev->ops = &virtio_ops_kernel;
dev->vhostfds = malloc(dev->max_queue_pairs *
dev->vhostfds[q] = -1;
dev->tapfds[q] = -1;
}
+ } else if (dev->backend_type ==
+ VIRTIO_USER_BACKEND_VHOST_VDPA) {
+ dev->ops = &virtio_ops_vdpa;
+ } else {
+ PMD_DRV_LOG(ERR, "Unknown backend type");
+ return -1;
}
}
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1 | \
- 1ULL << VIRTIO_F_RING_PACKED)
+ 1ULL << VIRTIO_F_RING_PACKED | \
+ 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+
+#define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \
+ (1ULL << VHOST_USER_PROTOCOL_F_MQ | \
+ 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
+ 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int server, int mrg_rxbuf, int in_order, int packed_vq)
+ int server, int mrg_rxbuf, int in_order, int packed_vq,
+ enum virtio_user_backend_type backend_type)
{
+ uint64_t protocol_features = 0;
+
pthread_mutex_init(&dev->mutex, NULL);
strlcpy(dev->path, path, PATH_MAX);
dev->started = 0;
dev->mac_specified = 0;
dev->frontend_features = 0;
dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
+ dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
+ dev->backend_type = backend_type;
+
parse_mac(dev, mac);
if (*ifname) {
return -1;
}
+ if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
+ dev->unsupported_features |=
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+
if (!dev->is_server) {
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
NULL) < 0) {
strerror(errno));
return -1;
}
+
+
+ if (dev->device_features &
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
+ if (dev->ops->send_request(dev,
+ VHOST_USER_GET_PROTOCOL_FEATURES,
+ &protocol_features))
+ return -1;
+
+ dev->protocol_features &= protocol_features;
+
+ if (dev->ops->send_request(dev,
+ VHOST_USER_SET_PROTOCOL_FEATURES,
+ &dev->protocol_features))
+ return -1;
+
+ if (!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_MQ);
+ }
} else {
/* We just pretend vhost-user can support all these features.
* Note that this could be problematic that if some feature is
dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
+
+
if (!mrg_rxbuf)
dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
}
/* The backend will not report this feature, we add it explicitly */
- if (is_vhost_user_by_type(dev->path))
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
/*
close(dev->kickfds[i]);
}
- close(dev->vhostfd);
+ if (dev->vhostfd >= 0)
+ close(dev->vhostfd);
if (dev->is_server && dev->listenfd >= 0) {
close(dev->listenfd);
}
if (dev->vhostfds) {
- for (i = 0; i < dev->max_queue_pairs; ++i)
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
close(dev->vhostfds[i]);
+ if (dev->tapfds[i] >= 0)
+ close(dev->tapfds[i]);
+ }
free(dev->vhostfds);
free(dev->tapfds);
}
queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
status = virtio_user_handle_mq(dev, queues);
+ } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
+ hdr->class == VIRTIO_NET_CTRL_MAC ||
+ hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ status = 0;
}
/* Update status */
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = desc->flags;
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
queues = *(uint16_t *)(uintptr_t)
vring->desc[idx_data].addr;
status = virtio_user_handle_mq(dev, queues);
+ } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
+ hdr->class == VIRTIO_NET_CTRL_MAC ||
+ hdr->class == VIRTIO_NET_CTRL_VLAN) {
+ status = 0;
}
/* Update status */
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
uint16_t n_descs, flags;
+ /* Perform a load-acquire barrier in desc_is_avail to
+ * enforce the ordering between desc flags and desc
+ * content.
+ */
while (desc_is_avail(&vring->desc[vq->used_idx],
vq->used_wrap_counter)) {
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- rte_smp_wmb();
- vring->desc[vq->used_idx].flags = flags;
+ __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
+ __ATOMIC_RELEASE);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
struct vring *vring = &dev->vrings[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (vring->used->idx != vring->avail->idx) {
- avail_idx = (vring->used->idx) & (vring->num - 1);
+ while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ != vring->avail->idx) {
+ avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ & (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
uep->id = desc_idx;
uep->len = n_descs;
- vring->used->idx++;
+ __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ }
+}
+
+int
+virtio_user_send_status_update(struct virtio_user_dev *dev, uint8_t status)
+{
+ int ret;
+ uint64_t arg = status;
+
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
+ ret = dev->ops->send_request(dev,
+ VHOST_USER_SET_STATUS, &arg);
+ else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
+ ret = dev->ops->send_request(dev,
+ VHOST_USER_SET_STATUS, &status);
+ else
+ return 0;
+
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+virtio_user_update_status(struct virtio_user_dev *dev)
+{
+ uint64_t ret;
+ uint8_t status;
+ int err;
+
+ if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
+ err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
+ if (!err && ret > UINT8_MAX) {
+ PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS "
+ "response 0x%" PRIx64 "\n", ret);
+ return -1;
+ }
+
+ status = ret;
+ } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) {
+ err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS,
+ &status);
+ } else {
+ return 0;
}
+
+ if (err) {
+ PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
+ strerror(errno));
+ return -1;
+ }
+
+ dev->status = status;
+ PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
+ "\t-RESET: %u\n"
+ "\t-ACKNOWLEDGE: %u\n"
+ "\t-DRIVER: %u\n"
+ "\t-DRIVER_OK: %u\n"
+ "\t-FEATURES_OK: %u\n"
+ "\t-DEVICE_NEED_RESET: %u\n"
+ "\t-FAILED: %u\n",
+ dev->status,
+ (dev->status == VIRTIO_CONFIG_STATUS_RESET),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
+ return 0;
}