X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fvirtio%2Fvirtio_user%2Fvirtio_user_dev.c;h=1c575d0cdd521119eb99423cb92a6df228f03ba7;hb=2c661d418e4a3fe06c56f024c97c574a609685f7;hp=5560bd9a37ec363419f372b55e3a9181f3fd34ef;hpb=aea29aa5d37b40080cfc1f9a1acba239bf03922f;p=dpdk.git diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index 5560bd9a37..1c575d0cdd 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -13,6 +13,7 @@ #include #include +#include #include #include "vhost.h" @@ -43,15 +44,26 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) struct vhost_vring_file file; struct vhost_vring_state state; struct vring *vring = &dev->vrings[queue_sel]; + struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; struct vhost_vring_addr addr = { .index = queue_sel, - .desc_user_addr = (uint64_t)(uintptr_t)vring->desc, - .avail_user_addr = (uint64_t)(uintptr_t)vring->avail, - .used_user_addr = (uint64_t)(uintptr_t)vring->used, .log_guest_addr = 0, .flags = 0, /* disable log */ }; + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { + addr.desc_user_addr = + (uint64_t)(uintptr_t)pq_vring->desc; + addr.avail_user_addr = + (uint64_t)(uintptr_t)pq_vring->driver; + addr.used_user_addr = + (uint64_t)(uintptr_t)pq_vring->device; + } else { + addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; + addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; + addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; + } + state.index = queue_sel; state.num = vring->num; dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); @@ -113,7 +125,6 @@ is_vhost_user_by_type(const char *path) int virtio_user_start_device(struct virtio_user_dev *dev) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; uint64_t features; int ret; @@ -130,7 +141,7 @@ virtio_user_start_device(struct virtio_user_dev *dev) * replaced when we get proper supports from the * memory subsystem in the future. */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); pthread_mutex_lock(&dev->mutex); if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) @@ -168,12 +179,12 @@ virtio_user_start_device(struct virtio_user_dev *dev) dev->started = true; pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return 0; error: pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* TODO: free resource here or caller to check */ return -1; } @@ -213,17 +224,13 @@ out: static inline void parse_mac(struct virtio_user_dev *dev, const char *mac) { - int i, r; - uint32_t tmp[ETHER_ADDR_LEN]; + struct rte_ether_addr tmp; if (!mac) return; - r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0], - &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]); - if (r == ETHER_ADDR_LEN) { - for (i = 0; i < ETHER_ADDR_LEN; ++i) - dev->mac_addr[i] = (uint8_t)tmp[i]; + if (rte_ether_unformat_addr(mac, &tmp) == 0) { + memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); dev->mac_specified = 1; } else { /* ignore the wrong mac, use random mac */ @@ -410,20 +417,20 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 1ULL << VIRTIO_F_IN_ORDER | \ 1ULL << VIRTIO_F_VERSION_1 | \ - 1ULL << VIRTIO_F_RING_PACKED | \ - 1ULL << VIRTIO_RING_F_EVENT_IDX) + 1ULL << VIRTIO_F_RING_PACKED) int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac, char **ifname, - int mrg_rxbuf, int in_order, int packed_vq) + int server, int mrg_rxbuf, int in_order, int packed_vq) { pthread_mutex_init(&dev->mutex, NULL); - snprintf(dev->path, PATH_MAX, "%s", path); + strlcpy(dev->path, path, PATH_MAX); dev->started = 0; dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ dev->queue_size = queue_size; + dev->is_server = server; dev->mac_specified = 0; dev->frontend_features = 0; dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; @@ -468,23 +475,13 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, if (!in_order) dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); - if (packed_vq) { - if (cq) { - PMD_INIT_LOG(ERR, "control vq not supported yet with " - "packed virtqueues\n"); - return -1; - } - dev->device_features |= (1ull << VIRTIO_F_RING_PACKED); - } else { - dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED); - } + if (!packed_vq) + dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); - if (dev->mac_specified) { - dev->device_features |= (1ull << VIRTIO_NET_F_MAC); - } else { - dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC); + if (dev->mac_specified) + dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); + else dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); - } if (cq) { /* device does not really need to know anything about CQ, @@ -624,6 +621,94 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, return n_descs; } +static inline int +desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) +{ + uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); + + return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && + wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); +} + +static uint32_t +virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, + struct vring_packed *vring, + uint16_t idx_hdr) +{ + struct virtio_net_ctrl_hdr *hdr; + virtio_net_ctrl_ack status = ~0; + uint16_t idx_data, idx_status; + /* initialize to one, header is first */ + uint32_t n_descs = 1; + + /* locate desc for header, data, and status */ + idx_data = idx_hdr + 1; + if (idx_data >= dev->queue_size) + idx_data -= dev->queue_size; + + n_descs++; + + idx_status = idx_data; + while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { + idx_status++; + if (idx_status >= dev->queue_size) + idx_status -= dev->queue_size; + n_descs++; + } + + hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; + if (hdr->class == VIRTIO_NET_CTRL_MQ && + hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { + uint16_t queues; + + queues = *(uint16_t *)(uintptr_t) + vring->desc[idx_data].addr; + status = virtio_user_handle_mq(dev, queues); + } + + /* Update status */ + *(virtio_net_ctrl_ack *)(uintptr_t) + vring->desc[idx_status].addr = status; + + /* Update used descriptor */ + vring->desc[idx_hdr].id = vring->desc[idx_status].id; + vring->desc[idx_hdr].len = sizeof(status); + + return n_descs; +} + +void +virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) +{ + struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; + struct vring_packed *vring = &dev->packed_vrings[queue_idx]; + uint16_t n_descs, flags; + + /* Perform a load-acquire barrier in desc_is_avail to + * enforce the ordering between desc flags and desc + * content. + */ + while (desc_is_avail(&vring->desc[vq->used_idx], + vq->used_wrap_counter)) { + + n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, + vq->used_idx); + + flags = VRING_DESC_F_WRITE; + if (vq->used_wrap_counter) + flags |= VRING_PACKED_DESC_F_AVAIL_USED; + + __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, + __ATOMIC_RELEASE); + + vq->used_idx += n_descs; + if (vq->used_idx >= dev->queue_size) { + vq->used_idx -= dev->queue_size; + vq->used_wrap_counter ^= 1; + } + } +} + void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) { @@ -641,7 +726,7 @@ virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) /* Update used ring */ uep = &vring->used->ring[avail_idx]; - uep->id = avail_idx; + uep->id = desc_idx; uep->len = n_descs; vring->used->idx++;