X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_user%2Fvirtio_user_dev.c;h=63424656e3c1a8e55640e97320aa4fe92aed9f8b;hb=844e4683718742b28a7708cc00f7ee7246f92d7b;hp=b004bc13a5b35203ceb57735f141d9e3b0e1057f;hpb=12e9e70c89cd3fc9baa9b82ac21bce5ccb6c4d25;p=dpdk.git diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index b004bc13a5..63424656e3 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -13,6 +13,7 @@ #include #include +#include #include #include "vhost.h" @@ -111,21 +112,41 @@ virtio_user_queue_setup(struct virtio_user_dev *dev, } int -is_vhost_user_by_type(const char *path) +virtio_user_dev_set_features(struct virtio_user_dev *dev) { - struct stat sb; + uint64_t features; + int ret = -1; - if (stat(path, &sb) == -1) - return 0; + pthread_mutex_lock(&dev->mutex); + + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && + dev->vhostfd < 0) + goto error; + + /* Step 0: tell vhost to create queues */ + if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) + goto error; + + features = dev->features; - return S_ISSOCK(sb.st_mode); + /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ + features &= ~(1ull << VIRTIO_NET_F_MAC); + /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ + features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); + features &= ~(1ull << VIRTIO_NET_F_STATUS); + ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); + if (ret < 0) + goto error; + PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); +error: + pthread_mutex_unlock(&dev->mutex); + + return ret; } int virtio_user_start_device(struct virtio_user_dev *dev) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - uint64_t features; int ret; /* @@ -141,28 +162,13 @@ virtio_user_start_device(struct virtio_user_dev *dev) * replaced when we get proper supports from the * memory subsystem in the future. */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); pthread_mutex_lock(&dev->mutex); - if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && + dev->vhostfd < 0) goto error; - /* Step 0: tell vhost to create queues */ - if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) - goto error; - - /* Step 1: set features */ - features = dev->features; - /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ - features &= ~(1ull << VIRTIO_NET_F_MAC); - /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ - features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); - features &= ~(1ull << VIRTIO_NET_F_STATUS); - ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); - if (ret < 0) - goto error; - PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); - /* Step 2: share memory regions */ ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); if (ret < 0) @@ -179,12 +185,12 @@ virtio_user_start_device(struct virtio_user_dev *dev) dev->started = true; pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return 0; error: pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* TODO: free resource here or caller to check */ return -1; } @@ -224,17 +230,13 @@ out: static inline void parse_mac(struct virtio_user_dev *dev, const char *mac) { - int i, r; - uint32_t tmp[ETHER_ADDR_LEN]; + struct rte_ether_addr tmp; if (!mac) return; - r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0], - &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]); - if (r == ETHER_ADDR_LEN) { - for (i = 0; i < ETHER_ADDR_LEN; ++i) - dev->mac_addr[i] = (uint8_t)tmp[i]; + if (rte_ether_unformat_addr(mac, &tmp) == 0) { + memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); dev->mac_specified = 1; } else { /* ignore the wrong mac, use random mac */ @@ -319,9 +321,9 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev) static void virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, - const void *addr __rte_unused, - size_t len __rte_unused, - void *arg) + const void *addr, + size_t len __rte_unused, + void *arg) { struct virtio_user_dev *dev = arg; struct rte_memseg_list *msl; @@ -362,16 +364,16 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) dev->tapfds = NULL; if (dev->is_server) { - if (access(dev->path, F_OK) == 0 && - !is_vhost_user_by_type(dev->path)) { - PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!"); + if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { + PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); return -1; } dev->ops = &virtio_ops_user; } else { - if (is_vhost_user_by_type(dev->path)) { + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { dev->ops = &virtio_ops_user; - } else { + } else if (dev->backend_type == + VIRTIO_USER_BACKEND_VHOST_KERNEL) { dev->ops = &virtio_ops_kernel; dev->vhostfds = malloc(dev->max_queue_pairs * @@ -421,15 +423,24 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 1ULL << VIRTIO_F_IN_ORDER | \ 1ULL << VIRTIO_F_VERSION_1 | \ - 1ULL << VIRTIO_F_RING_PACKED) + 1ULL << VIRTIO_F_RING_PACKED | \ + 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) + +#define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ + (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ + 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ + 1ULL << VHOST_USER_PROTOCOL_F_STATUS) int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac, char **ifname, - int server, int mrg_rxbuf, int in_order, int packed_vq) + int server, int mrg_rxbuf, int in_order, int packed_vq, + enum virtio_user_backend_type backend_type) { + uint64_t protocol_features = 0; + pthread_mutex_init(&dev->mutex, NULL); - snprintf(dev->path, PATH_MAX, "%s", path); + strlcpy(dev->path, path, PATH_MAX); dev->started = 0; dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ @@ -438,6 +449,9 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, dev->mac_specified = 0; dev->frontend_features = 0; dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; + dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; + dev->backend_type = backend_type; + parse_mac(dev, mac); if (*ifname) { @@ -450,6 +464,10 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, return -1; } + if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) + dev->unsupported_features |= + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); + if (!dev->is_server) { if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) { @@ -464,6 +482,27 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, strerror(errno)); return -1; } + + + if (dev->device_features & + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { + if (dev->ops->send_request(dev, + VHOST_USER_GET_PROTOCOL_FEATURES, + &protocol_features)) + return -1; + + dev->protocol_features &= protocol_features; + + if (dev->ops->send_request(dev, + VHOST_USER_SET_PROTOCOL_FEATURES, + &dev->protocol_features)) + return -1; + + if (!(dev->protocol_features & + (1ULL << VHOST_USER_PROTOCOL_F_MQ))) + dev->unsupported_features |= + (1ull << VIRTIO_NET_F_MQ); + } } else { /* We just pretend vhost-user can support all these features. * Note that this could be problematic that if some feature is @@ -473,6 +512,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; } + + if (!mrg_rxbuf) dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); @@ -505,7 +546,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, } /* The backend will not report this feature, we add it explicitly */ - if (is_vhost_user_by_type(dev->path)) + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); /* @@ -541,7 +582,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) close(dev->kickfds[i]); } - close(dev->vhostfd); + if (dev->vhostfd >= 0) + close(dev->vhostfd); if (dev->is_server && dev->listenfd >= 0) { close(dev->listenfd); @@ -549,8 +591,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) } if (dev->vhostfds) { - for (i = 0; i < dev->max_queue_pairs; ++i) + for (i = 0; i < dev->max_queue_pairs; ++i) { close(dev->vhostfds[i]); + if (dev->tapfds[i] >= 0) + close(dev->tapfds[i]); + } free(dev->vhostfds); free(dev->tapfds); } @@ -617,6 +662,10 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; status = virtio_user_handle_mq(dev, queues); + } else if (hdr->class == VIRTIO_NET_CTRL_RX || + hdr->class == VIRTIO_NET_CTRL_MAC || + hdr->class == VIRTIO_NET_CTRL_VLAN) { + status = 0; } /* Update status */ @@ -628,7 +677,7 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, static inline int desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) { - uint16_t flags = desc->flags; + uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); @@ -668,6 +717,10 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, queues = *(uint16_t *)(uintptr_t) vring->desc[idx_data].addr; status = virtio_user_handle_mq(dev, queues); + } else if (hdr->class == VIRTIO_NET_CTRL_RX || + hdr->class == VIRTIO_NET_CTRL_MAC || + hdr->class == VIRTIO_NET_CTRL_VLAN) { + status = 0; } /* Update status */ @@ -688,6 +741,10 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) struct vring_packed *vring = &dev->packed_vrings[queue_idx]; uint16_t n_descs, flags; + /* Perform a load-acquire barrier in desc_is_avail to + * enforce the ordering between desc flags and desc + * content. + */ while (desc_is_avail(&vring->desc[vq->used_idx], vq->used_wrap_counter)) { @@ -698,8 +755,8 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) if (vq->used_wrap_counter) flags |= VRING_PACKED_DESC_F_AVAIL_USED; - rte_smp_wmb(); - vring->desc[vq->used_idx].flags = flags; + __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, + __ATOMIC_RELEASE); vq->used_idx += n_descs; if (vq->used_idx >= dev->queue_size) { @@ -718,8 +775,10 @@ virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) struct vring *vring = &dev->vrings[queue_idx]; /* Consume avail ring, using used ring idx as first one */ - while (vring->used->idx != vring->avail->idx) { - avail_idx = (vring->used->idx) & (vring->num - 1); + while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) + != vring->avail->idx) { + avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) + & (vring->num - 1); desc_idx = vring->avail->ring[avail_idx]; n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); @@ -729,6 +788,79 @@ virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) uep->id = desc_idx; uep->len = n_descs; - vring->used->idx++; + __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); + } +} + +int +virtio_user_send_status_update(struct virtio_user_dev *dev, uint8_t status) +{ + int ret; + uint64_t arg = status; + + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) + ret = dev->ops->send_request(dev, + VHOST_USER_SET_STATUS, &arg); + else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) + ret = dev->ops->send_request(dev, + VHOST_USER_SET_STATUS, &status); + else + return 0; + + if (ret) { + PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret, + strerror(errno)); + return -1; } + + return 0; +} + +int +virtio_user_update_status(struct virtio_user_dev *dev) +{ + uint64_t ret; + uint8_t status; + int err; + + if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { + err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret); + if (!err && ret > UINT8_MAX) { + PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS " + "response 0x%" PRIx64 "\n", ret); + return -1; + } + + status = ret; + } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) { + err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, + &status); + } else { + return 0; + } + + if (err) { + PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err, + strerror(errno)); + return -1; + } + + dev->status = status; + PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" + "\t-RESET: %u\n" + "\t-ACKNOWLEDGE: %u\n" + "\t-DRIVER: %u\n" + "\t-DRIVER_OK: %u\n" + "\t-FEATURES_OK: %u\n" + "\t-DEVICE_NEED_RESET: %u\n" + "\t-FAILED: %u\n", + dev->status, + (dev->status == VIRTIO_CONFIG_STATUS_RESET), + !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), + !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), + !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), + !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), + !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), + !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); + return 0; }