+
+uint32_t
+rte_vhost_rx_queue_count(int vid, uint16_t qid)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ uint32_t ret = 0;
+
+ dev = get_device(vid);
+ if (dev == NULL)
+ return 0;
+
+ if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, qid);
+ return 0;
+ }
+
+ vq = dev->virtqueue[qid];
+ if (vq == NULL)
+ return 0;
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0 || vq->avail == NULL))
+ goto out;
+
+ ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
+}
+
+struct rte_vdpa_device *
+rte_vhost_get_vdpa_device(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return NULL;
+
+ return dev->vdpa_dev;
+}
+
+int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+ uint64_t *log_size)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || log_base == NULL || log_size == NULL)
+ return -1;
+
+ *log_base = dev->log_base;
+ *log_size = dev->log_size;
+
+ return 0;
+}
+
+int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
+ if (vq_is_packed(dev)) {
+ *last_avail_idx = (vq->avail_wrap_counter << 15) |
+ vq->last_avail_idx;
+ *last_used_idx = (vq->used_wrap_counter << 15) |
+ vq->last_used_idx;
+ } else {
+ *last_avail_idx = vq->last_avail_idx;
+ *last_used_idx = vq->last_used_idx;
+ }
+
+ return 0;
+}
+
+int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
+ if (vq_is_packed(dev)) {
+ vq->last_avail_idx = last_avail_idx & 0x7fff;
+ vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
+ vq->last_used_idx = last_used_idx & 0x7fff;
+ vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
+ } else {
+ vq->last_avail_idx = last_avail_idx;
+ vq->last_used_idx = last_used_idx;
+ }
+
+ return 0;
+}
+
+int
+rte_vhost_get_vring_base_from_inflight(int vid,
+ uint16_t queue_id,
+ uint16_t *last_avail_idx,
+ uint16_t *last_used_idx)
+{
+ struct rte_vhost_inflight_info_packed *inflight_info;
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
+ if (!vq_is_packed(dev))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (!inflight_info)
+ return -1;
+
+ *last_avail_idx = (inflight_info->old_used_wrap_counter << 15) |
+ inflight_info->old_used_idx;
+ *last_used_idx = *last_avail_idx;
+
+ return 0;
+}
+
+int rte_vhost_extern_callback_register(int vid,
+ struct rte_vhost_user_extern_ops const * const ops, void *ctx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ dev->extern_ops = *ops;
+ dev->extern_data = ctx;
+ return 0;
+}
+
+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ uint32_t features,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ struct rte_vhost_async_features f;
+ int node;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ f.intval = features;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ /* packed queue is not supported */
+ if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on packed queue or non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->async_registered)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: channel already registered "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
+ VHOST_LOG_CONFIG(ERR,
+ "unable to get numa information in async register. "
+ "allocating async buffer memory on the caller thread node\n");
+ node = SOCKET_ID_ANY;
+ }
+#else
+ node = SOCKET_ID_ANY;
+#endif
+
+ vq->async_pkts_info = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct async_inflight_info),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->it_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->vec_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->async_descs_split = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!vq->async_descs_split || !vq->async_pkts_info ||
+ !vq->it_pool || !vq->vec_pool) {
+ vhost_free_async_mem(vq);
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: cannot allocate memory for vq data "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+ vq->async_ops.check_completed_copies = ops->check_completed_copies;
+ vq->async_ops.transfer_data = ops->transfer_data;
+
+ vq->async_inorder = f.async_inorder;
+ vq->async_threshold = f.async_threshold;
+
+ vq->async_registered = true;
+
+reg_out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return 0;
+}
+
+int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret = -1;
+
+ if (dev == NULL)
+ return ret;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return ret;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return ret;
+
+ ret = 0;
+
+ if (!vq->async_registered)
+ return ret;
+
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "virt queue busy.\n");
+ return -1;
+ }
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ ret = -1;
+ goto out;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
+}
+
+RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO);
+RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING);