It is disabled by default.
+ - ``RTE_VHOST_USER_NET_STATS_ENABLE``
+
+ Per-virtqueue statistics collection will be enabled when this flag is set.
+ When enabled, the application may use rte_vhost_stats_get_names() and
+ rte_vhost_stats_get() to collect statistics, and rte_vhost_stats_reset() to
+ reset them.
+
+ It is disabled by default
+
* ``rte_vhost_driver_set_features(path, features)``
This function sets the feature bits the vhost-user driver supports. The
Clear inflight packets which are submitted to DMA engine in vhost async data
path. Completed packets are returned to applications through ``pkts``.
+* ``rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, struct rte_vhost_stat_name *names, unsigned int size)``
+
+ This function returns the names of the queue statistics. It requires
+ statistics collection to be enabled at registration time.
+
+* ``rte_vhost_vring_stats_get(int vid, uint16_t queue_id, struct rte_vhost_stat *stats, unsigned int n)``
+
+ This function returns the queue statistics. It requires statistics
+ collection to be enabled at registration time.
+
+* ``rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)``
+
+ This function resets the queue statistics. It requires statistics
+ collection to be enabled at registration time.
+
Vhost-user Implementations
--------------------------
#define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
#define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
#define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS (1ULL << 8)
+#define RTE_VHOST_USER_NET_STATS_ENABLE (1ULL << 9)
/* Features. */
#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
uint8_t match;
};
+/** Maximum name length for the statistics counters */
+#define RTE_VHOST_STATS_NAME_SIZE 64
+
+/**
+ * Vhost virtqueue statistics structure
+ *
+ * This structure is used by rte_vhost_vring_stats_get() to provide
+ * virtqueue statistics to the calling application.
+ * It maps a name ID, corresponding to an index in the array returned
+ * by rte_vhost_vring_stats_get_names(), to a statistic value.
+ */
+struct rte_vhost_stat {
+ uint64_t id; /**< The index in xstats name array. */
+ uint64_t value; /**< The statistic counter value. */
+};
+
+/**
+ * Vhost virtqueue statistic name element
+ *
+ * This structure is used by rte_vhost_vring_stats_get_names() to
+ * provide virtqueue statistics names to the calling application.
+ */
+struct rte_vhost_stat_name {
+ char name[RTE_VHOST_STATS_NAME_SIZE]; /**< The statistic name. */
+};
+
/**
* Convert guest physical address to host virtual address
*
int
rte_vhost_slave_config_change(int vid, bool need_reply);
+/**
+ * Retrieve names of statistics of a Vhost virtqueue.
+ *
+ * There is an assumption that 'stat_names' and 'stats' arrays are matched
+ * by array index: stats_names[i].name => stats[i].value
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * vhost queue index
+ * @param name
+ * array of at least size elements to be filled.
+ * If set to NULL, the function returns the required number of elements.
+ * @param size
+ * The number of elements in stats_names array.
+ * @return
+ * - Success if greater than 0 and lower or equal to *size*. The return value
+ * indicates the number of elements filled in the *names* array.
+ * - Failure if greater than *size*. The return value indicates the number of
+ * elements the *names* array that should be given to succeed.
+ * - Failure if lower than 0. The device ID or queue ID is invalid or
+ + statistics collection is not enabled.
+ */
+__rte_experimental
+int
+rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id,
+ struct rte_vhost_stat_name *name, unsigned int size);
+
+/**
+ * Retrieve statistics of a Vhost virtqueue.
+ *
+ * There is an assumption that 'stat_names' and 'stats' arrays are matched
+ * by array index: stats_names[i].name => stats[i].value
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * vhost queue index
+ * @param stats
+ * A pointer to a table of structure of type rte_vhost_stat to be filled with
+ * virtqueue statistics ids and values.
+ * @param n
+ * The number of elements in stats array.
+ * @return
+ * - Success if greater than 0 and lower or equal to *n*. The return value
+ * indicates the number of elements filled in the *stats* array.
+ * - Failure if greater than *n*. The return value indicates the number of
+ * elements the *stats* array that should be given to succeed.
+ * - Failure if lower than 0. The device ID or queue ID is invalid, or
+ * statistics collection is not enabled.
+ */
+__rte_experimental
+int
+rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
+ struct rte_vhost_stat *stats, unsigned int n);
+
+/**
+ * Reset statistics of a Vhost virtqueue.
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * vhost queue index
+ * @return
+ * - Success if 0. Statistics have been reset.
+ * - Failure if lower than 0. The device ID or queue ID is invalid, or
+ * statistics collection is not enabled.
+ */
+__rte_experimental
+int
+rte_vhost_vring_stats_reset(int vid, uint16_t queue_id);
+
#ifdef __cplusplus
}
#endif
bool linearbuf;
bool async_copy;
bool net_compliant_ol_flags;
+ bool stats_enabled;
/*
* The "supported_features" indicates the feature bits the
vhost_set_ifname(vid, vsocket->path, size);
vhost_setup_virtio_net(vid, vsocket->use_builtin_virtio_net,
- vsocket->net_compliant_ol_flags);
+ vsocket->net_compliant_ol_flags, vsocket->stats_enabled);
vhost_attach_vdpa_device(vid, vsocket->vdpa_dev);
vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT;
vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;
vsocket->net_compliant_ol_flags = flags & RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
+ vsocket->stats_enabled = flags & RTE_VHOST_USER_NET_STATS_ENABLE;
if (vsocket->async_copy &&
(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |
# added in 22.07
rte_vhost_async_get_inflight_thread_unsafe;
-
+ rte_vhost_vring_stats_get_names;
+ rte_vhost_vring_stats_get;
+ rte_vhost_vring_stats_reset;
};
INTERNAL {
struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
+struct vhost_vq_stats_name_off {
+ char name[RTE_VHOST_STATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = {
+ {"good_packets", offsetof(struct vhost_virtqueue, stats.packets)},
+ {"good_bytes", offsetof(struct vhost_virtqueue, stats.bytes)},
+ {"multicast_packets", offsetof(struct vhost_virtqueue, stats.multicast)},
+ {"broadcast_packets", offsetof(struct vhost_virtqueue, stats.broadcast)},
+ {"undersize_packets", offsetof(struct vhost_virtqueue, stats.size_bins[0])},
+ {"size_64_packets", offsetof(struct vhost_virtqueue, stats.size_bins[1])},
+ {"size_65_127_packets", offsetof(struct vhost_virtqueue, stats.size_bins[2])},
+ {"size_128_255_packets", offsetof(struct vhost_virtqueue, stats.size_bins[3])},
+ {"size_256_511_packets", offsetof(struct vhost_virtqueue, stats.size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct vhost_virtqueue, stats.size_bins[5])},
+ {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])},
+ {"size_1519_max_packets", offsetof(struct vhost_virtqueue, stats.size_bins[7])},
+};
+
+#define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings)
+
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
void
-vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags)
+vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled)
{
struct virtio_net *dev = get_device(vid);
dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS;
else
dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS;
+ if (stats_enabled)
+ dev->flags |= VIRTIO_DEV_STATS_ENABLED;
+ else
+ dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
}
void
return 0;
}
+
+int
+rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id,
+ struct rte_vhost_stat_name *name, unsigned int size)
+{
+ struct virtio_net *dev = get_device(vid);
+ unsigned int i;
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= dev->nr_vring)
+ return -1;
+
+ if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+ return -1;
+
+ if (name == NULL || size < VHOST_NB_VQ_STATS)
+ return VHOST_NB_VQ_STATS;
+
+ for (i = 0; i < VHOST_NB_VQ_STATS; i++)
+ snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s",
+ (queue_id & 1) ? "rx" : "tx",
+ queue_id / 2, vhost_vq_stat_strings[i].name);
+
+ return VHOST_NB_VQ_STATS;
+}
+
+int
+rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
+ struct rte_vhost_stat *stats, unsigned int n)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ unsigned int i;
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= dev->nr_vring)
+ return -1;
+
+ if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+ return -1;
+
+ if (stats == NULL || n < VHOST_NB_VQ_STATS)
+ return VHOST_NB_VQ_STATS;
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+ for (i = 0; i < VHOST_NB_VQ_STATS; i++) {
+ stats[i].value =
+ *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
+ stats[i].id = i;
+ }
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return VHOST_NB_VQ_STATS;
+}
+
+int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= dev->nr_vring)
+ return -1;
+
+ if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return 0;
+}
+
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);
#define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
/* Used to indicate that the virtio_net tx code should fill TX ol_flags */
#define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
+/* Used to indicate the application has requested statistics collection */
+#define VIRTIO_DEV_STATS_ENABLED ((uint32_t)1 << 6)
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
uint32_t count;
};
+/**
+ * Virtqueue statistics
+ */
+struct virtqueue_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
/**
* iovec
*/
#define VIRTIO_UNINITIALIZED_NOTIF (-1)
struct vhost_vring_addr ring_addrs;
+ struct virtqueue_stats stats;
} __rte_cache_aligned;
/* Virtio device status as per Virtio specification */
void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
-void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags);
+void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags, bool stats_enabled);
void vhost_enable_extbuf(int vid);
void vhost_enable_linearbuf(int vid);
int vhost_enable_guest_notification(struct virtio_net *dev,
return true;
}
-
#endif /* _VHOST_NET_CDEV_H_ */
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
+/*
+ * This function must be called with virtqueue's access_lock taken.
+ */
+static inline void
+vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtqueue_stats *stats = &vq->stats;
+ int i;
+
+ if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+ return;
+
+ for (i = 0; i < count; i++) {
+ struct rte_ether_addr *ea;
+ struct rte_mbuf *pkt = pkts[i];
+ uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt);
+
+ stats->packets++;
+ stats->bytes += pkt_len;
+
+ if (pkt_len == 64) {
+ stats->size_bins[1]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(pkt_len) * 8) - __builtin_clz(pkt_len) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (pkt_len < 64)
+ stats->size_bins[0]++;
+ else if (pkt_len < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+ }
+}
+
static __rte_always_inline int64_t
vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
else
nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
+ vhost_queue_stats_update(dev, vq, pkts, nb_tx);
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
+
out:
rte_spinlock_unlock(&vq->access_lock);
* learning table will get updated first.
*/
pkts[0] = rarp_mbuf;
+ vhost_queue_stats_update(dev, vq, pkts, 1);
pkts++;
count -= 1;
}
count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
}
+ vhost_queue_stats_update(dev, vq, pkts, count);
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);