replace zero-length arrays with flexible ones
[dpdk.git] / lib / vhost / vhost.c
index 3b05f17..b14521e 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/vhost.h>
 #include <linux/virtio_net.h>
-#include <stddef.h>
 #include <stdint.h>
 #include <stdlib.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #endif
 
 #include <rte_errno.h>
-#include <rte_ethdev.h>
 #include <rte_log.h>
-#include <rte_string_fns.h>
 #include <rte_memory.h>
 #include <rte_malloc.h>
 #include <rte_vhost.h>
-#include <rte_rwlock.h>
 
 #include "iotlb.h"
 #include "vhost.h"
 #include "vhost_user.h"
 
-struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
 
+struct vhost_vq_stats_name_off {
+       char name[RTE_VHOST_STATS_NAME_SIZE];
+       unsigned int offset;
+};
+
+static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = {
+       {"good_packets",           offsetof(struct vhost_virtqueue, stats.packets)},
+       {"good_bytes",             offsetof(struct vhost_virtqueue, stats.bytes)},
+       {"multicast_packets",      offsetof(struct vhost_virtqueue, stats.multicast)},
+       {"broadcast_packets",      offsetof(struct vhost_virtqueue, stats.broadcast)},
+       {"undersize_packets",      offsetof(struct vhost_virtqueue, stats.size_bins[0])},
+       {"size_64_packets",        offsetof(struct vhost_virtqueue, stats.size_bins[1])},
+       {"size_65_127_packets",    offsetof(struct vhost_virtqueue, stats.size_bins[2])},
+       {"size_128_255_packets",   offsetof(struct vhost_virtqueue, stats.size_bins[3])},
+       {"size_256_511_packets",   offsetof(struct vhost_virtqueue, stats.size_bins[4])},
+       {"size_512_1023_packets",  offsetof(struct vhost_virtqueue, stats.size_bins[5])},
+       {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])},
+       {"size_1519_max_packets",  offsetof(struct vhost_virtqueue, stats.size_bins[7])},
+       {"guest_notifications",    offsetof(struct vhost_virtqueue, stats.guest_notifications)},
+       {"iotlb_hits",             offsetof(struct vhost_virtqueue, stats.iotlb_hits)},
+       {"iotlb_misses",           offsetof(struct vhost_virtqueue, stats.iotlb_misses)},
+       {"inflight_submitted",     offsetof(struct vhost_virtqueue, stats.inflight_submitted)},
+       {"inflight_completed",     offsetof(struct vhost_virtqueue, stats.inflight_completed)},
+};
+
+#define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings)
+
 /* Called with iotlb_lock read-locked */
 uint64_t
 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
@@ -41,8 +64,14 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
        tmp_size = *size;
 
        vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
-       if (tmp_size == *size)
+       if (tmp_size == *size) {
+               if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
+                       vq->stats.iotlb_hits++;
                return vva;
+       }
+
+       if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
+               vq->stats.iotlb_misses++;
 
        iova += tmp_size;
 
@@ -58,7 +87,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
                vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
                if (vhost_user_iotlb_miss(dev, iova, perm)) {
-                       VHOST_LOG_CONFIG(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+                       VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
                                dev->ifname, iova);
                        vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
                }
@@ -343,6 +372,7 @@ vhost_free_async_mem(struct vhost_virtqueue *vq)
                return;
 
        rte_free(vq->async->pkts_info);
+       rte_free(vq->async->pkts_cmpl_flag);
 
        rte_free(vq->async->buffers_packed);
        vq->async->buffers_packed = NULL;
@@ -420,7 +450,7 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
                gpa = hva_to_gpa(dev, hva, exp_size);
                if (!gpa) {
-                       VHOST_LOG_CONFIG(ERR,
+                       VHOST_LOG_DATA(ERR,
                                "(%s) failed to find GPA for log_addr: 0x%"
                                PRIx64 " hva: 0x%" PRIx64 "\n",
                                dev->ifname, log_addr, hva);
@@ -665,12 +695,12 @@ vhost_new_device(void)
        int i;
 
        pthread_mutex_lock(&vhost_dev_lock);
-       for (i = 0; i < MAX_VHOST_DEVICE; i++) {
+       for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
                if (vhost_devices[i] == NULL)
                        break;
        }
 
-       if (i == MAX_VHOST_DEVICE) {
+       if (i == RTE_MAX_VHOST_DEVICE) {
                VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
                pthread_mutex_unlock(&vhost_dev_lock);
                return -1;
@@ -758,7 +788,7 @@ vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
 }
 
 void
-vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags)
+vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled)
 {
        struct virtio_net *dev = get_device(vid);
 
@@ -773,6 +803,10 @@ vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags)
                dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS;
        else
                dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS;
+       if (stats_enabled)
+               dev->flags |= VIRTIO_DEV_STATS_ENABLED;
+       else
+               dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
 }
 
 void
@@ -1294,11 +1328,15 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
        if (!vq)
                return -1;
 
+       rte_spinlock_lock(&vq->access_lock);
+
        if (vq_is_packed(dev))
                vhost_vring_call_packed(dev, vq);
        else
                vhost_vring_call_split(dev, vq);
 
+       rte_spinlock_unlock(&vq->access_lock);
+
        return 0;
 }
 
@@ -1621,8 +1659,7 @@ rte_vhost_extern_callback_register(int vid,
 }
 
 static __rte_always_inline int
-async_channel_register(int vid, uint16_t queue_id,
-               struct rte_vhost_async_channel_ops *ops)
+async_channel_register(int vid, uint16_t queue_id)
 {
        struct virtio_net *dev = get_device(vid);
        struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
@@ -1651,6 +1688,14 @@ async_channel_register(int vid, uint16_t queue_id,
                goto out_free_async;
        }
 
+       async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
+                       RTE_CACHE_LINE_SIZE, node);
+       if (!async->pkts_cmpl_flag) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
+                               dev->ifname, queue_id);
+               goto out_free_async;
+       }
+
        if (vq_is_packed(dev)) {
                async->buffers_packed = rte_malloc_socket(NULL,
                                vq->size * sizeof(struct vring_used_elem_packed),
@@ -1671,9 +1716,6 @@ async_channel_register(int vid, uint16_t queue_id,
                }
        }
 
-       async->ops.check_completed_copies = ops->check_completed_copies;
-       async->ops.transfer_data = ops->transfer_data;
-
        vq->async = async;
 
        return 0;
@@ -1686,15 +1728,13 @@ out_free_async:
 }
 
 int
-rte_vhost_async_channel_register(int vid, uint16_t queue_id,
-               struct rte_vhost_async_config config,
-               struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register(int vid, uint16_t queue_id)
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
        int ret;
 
-       if (dev == NULL || ops == NULL)
+       if (dev == NULL)
                return -1;
 
        if (queue_id >= VHOST_MAX_VRING)
@@ -1705,33 +1745,20 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
        if (unlikely(vq == NULL || !dev->async_copy))
                return -1;
 
-       if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
-               VHOST_LOG_CONFIG(ERR,
-                       "(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
-                       dev->ifname, queue_id);
-               return -1;
-       }
-
-       if (unlikely(ops->check_completed_copies == NULL ||
-               ops->transfer_data == NULL))
-               return -1;
-
        rte_spinlock_lock(&vq->access_lock);
-       ret = async_channel_register(vid, queue_id, ops);
+       ret = async_channel_register(vid, queue_id);
        rte_spinlock_unlock(&vq->access_lock);
 
        return ret;
 }
 
 int
-rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
-               struct rte_vhost_async_config config,
-               struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
 
-       if (dev == NULL || ops == NULL)
+       if (dev == NULL)
                return -1;
 
        if (queue_id >= VHOST_MAX_VRING)
@@ -1742,18 +1769,13 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
        if (unlikely(vq == NULL || !dev->async_copy))
                return -1;
 
-       if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
-               VHOST_LOG_CONFIG(ERR,
-                       "(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
-                       dev->ifname, queue_id);
+       if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
+               VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
+                               dev->ifname, __func__);
                return -1;
        }
 
-       if (unlikely(ops->check_completed_copies == NULL ||
-               ops->transfer_data == NULL))
-               return -1;
-
-       return async_channel_register(vid, queue_id, ops);
+       return async_channel_register(vid, queue_id);
 }
 
 int
@@ -1774,27 +1796,23 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
        if (vq == NULL)
                return ret;
 
-       ret = 0;
-
-       if (!vq->async)
-               return ret;
-
        if (!rte_spinlock_trylock(&vq->access_lock)) {
                VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
                                dev->ifname);
-               return -1;
+               return ret;
        }
 
-       if (vq->async->pkts_inflight_n) {
-               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
-                       "async inflight packets must be completed before unregistration.\n",
+       if (!vq->async) {
+               ret = 0;
+       } else if (vq->async->pkts_inflight_n) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+               VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
                        dev->ifname);
-               ret = -1;
-               goto out;
+       } else {
+               vhost_free_async_mem(vq);
+               ret = 0;
        }
 
-       vhost_free_async_mem(vq);
-out:
        rte_spinlock_unlock(&vq->access_lock);
 
        return ret;
@@ -1817,12 +1835,18 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
        if (vq == NULL)
                return -1;
 
+       if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
+               VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
+                               dev->ifname, __func__);
+               return -1;
+       }
+
        if (!vq->async)
                return 0;
 
        if (vq->async->pkts_inflight_n) {
-               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
-                       "async inflight packets must be completed before unregistration.\n",
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+               VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
                        dev->ifname);
                return -1;
        }
@@ -1832,6 +1856,68 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
        return 0;
 }
 
+int
+rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
+{
+       struct rte_dma_info info;
+       void *pkts_cmpl_flag_addr;
+       uint16_t max_desc;
+
+       if (!rte_dma_is_valid(dma_id)) {
+               VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
+               return -1;
+       }
+
+       rte_dma_info_get(dma_id, &info);
+       if (vchan_id >= info.max_vchans) {
+               VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+               return -1;
+       }
+
+       if (!dma_copy_track[dma_id].vchans) {
+               struct async_dma_vchan_info *vchans;
+
+               vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
+                               RTE_CACHE_LINE_SIZE);
+               if (vchans == NULL) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
+                                       dma_id, vchan_id);
+                       return -1;
+               }
+
+               dma_copy_track[dma_id].vchans = vchans;
+       }
+
+       if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
+               VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
+                               vchan_id);
+               return 0;
+       }
+
+       max_desc = info.max_desc;
+       if (!rte_is_power_of_2(max_desc))
+               max_desc = rte_align32pow2(max_desc);
+
+       pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
+       if (!pkts_cmpl_flag_addr) {
+               VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
+                               "vChannel %u.\n", dma_id, vchan_id);
+
+               if (dma_copy_track[dma_id].nr_vchans == 0) {
+                       rte_free(dma_copy_track[dma_id].vchans);
+                       dma_copy_track[dma_id].vchans = NULL;
+               }
+               return -1;
+       }
+
+       dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
+       dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc;
+       dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
+       dma_copy_track[dma_id].nr_vchans++;
+
+       return 0;
+}
+
 int
 rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
 {
@@ -1850,9 +1936,6 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
        if (vq == NULL)
                return ret;
 
-       if (!vq->async)
-               return ret;
-
        if (!rte_spinlock_trylock(&vq->access_lock)) {
                VHOST_LOG_CONFIG(DEBUG,
                        "(%s) failed to check in-flight packets. virtqueue busy.\n",
@@ -1860,12 +1943,46 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
                return ret;
        }
 
-       ret = vq->async->pkts_inflight_n;
+       if (vq->async)
+               ret = vq->async->pkts_inflight_n;
+
        rte_spinlock_unlock(&vq->access_lock);
 
        return ret;
 }
 
+int
+rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id)
+{
+       struct vhost_virtqueue *vq;
+       struct virtio_net *dev = get_device(vid);
+       int ret = -1;
+
+       if (dev == NULL)
+               return ret;
+
+       if (queue_id >= VHOST_MAX_VRING)
+               return ret;
+
+       vq = dev->virtqueue[queue_id];
+
+       if (vq == NULL)
+               return ret;
+
+       if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
+               VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
+                               dev->ifname, __func__);
+               return -1;
+       }
+
+       if (!vq->async)
+               return ret;
+
+       ret = vq->async->pkts_inflight_n;
+
+       return ret;
+}
+
 int
 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
                struct rte_vhost_power_monitor_cond *pmc)
@@ -1904,5 +2021,89 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
        return 0;
 }
 
+
+int
+rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id,
+               struct rte_vhost_stat_name *name, unsigned int size)
+{
+       struct virtio_net *dev = get_device(vid);
+       unsigned int i;
+
+       if (dev == NULL)
+               return -1;
+
+       if (queue_id >= dev->nr_vring)
+               return -1;
+
+       if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+               return -1;
+
+       if (name == NULL || size < VHOST_NB_VQ_STATS)
+               return VHOST_NB_VQ_STATS;
+
+       for (i = 0; i < VHOST_NB_VQ_STATS; i++)
+               snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s",
+                               (queue_id & 1) ? "rx" : "tx",
+                               queue_id / 2, vhost_vq_stat_strings[i].name);
+
+       return VHOST_NB_VQ_STATS;
+}
+
+int
+rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
+               struct rte_vhost_stat *stats, unsigned int n)
+{
+       struct virtio_net *dev = get_device(vid);
+       struct vhost_virtqueue *vq;
+       unsigned int i;
+
+       if (dev == NULL)
+               return -1;
+
+       if (queue_id >= dev->nr_vring)
+               return -1;
+
+       if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+               return -1;
+
+       if (stats == NULL || n < VHOST_NB_VQ_STATS)
+               return VHOST_NB_VQ_STATS;
+
+       vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+       for (i = 0; i < VHOST_NB_VQ_STATS; i++) {
+               stats[i].value =
+                       *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
+               stats[i].id = i;
+       }
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return VHOST_NB_VQ_STATS;
+}
+
+int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
+{
+       struct virtio_net *dev = get_device(vid);
+       struct vhost_virtqueue *vq;
+
+       if (dev == NULL)
+               return -1;
+
+       if (queue_id >= dev->nr_vring)
+               return -1;
+
+       if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+               return -1;
+
+       vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+       memset(&vq->stats, 0, sizeof(vq->stats));
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return 0;
+}
+
 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);