#include <linux/vhost.h>
#include <linux/virtio_net.h>
-#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#ifdef RTE_LIBRTE_VHOST_NUMA
#endif
#include <rte_errno.h>
-#include <rte_ethdev.h>
#include <rte_log.h>
-#include <rte_string_fns.h>
#include <rte_memory.h>
#include <rte_malloc.h>
#include <rte_vhost.h>
-#include <rte_rwlock.h>
#include "iotlb.h"
#include "vhost.h"
#include "vhost_user.h"
-struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
/* Called with iotlb_lock read-locked */
vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
if (vhost_user_iotlb_miss(dev, iova, perm)) {
- VHOST_LOG_CONFIG(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+ VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
dev->ifname, iova);
vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
}
return;
rte_free(vq->async->pkts_info);
+ rte_free(vq->async->pkts_cmpl_flag);
rte_free(vq->async->buffers_packed);
vq->async->buffers_packed = NULL;
gpa = hva_to_gpa(dev, hva, exp_size);
if (!gpa) {
- VHOST_LOG_CONFIG(ERR,
+ VHOST_LOG_DATA(ERR,
"(%s) failed to find GPA for log_addr: 0x%"
PRIx64 " hva: 0x%" PRIx64 "\n",
dev->ifname, log_addr, hva);
int i;
pthread_mutex_lock(&vhost_dev_lock);
- for (i = 0; i < MAX_VHOST_DEVICE; i++) {
+ for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
- if (i == MAX_VHOST_DEVICE) {
+ if (i == RTE_MAX_VHOST_DEVICE) {
VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
static __rte_always_inline int
-async_channel_register(int vid, uint16_t queue_id,
- struct rte_vhost_async_channel_ops *ops)
+async_channel_register(int vid, uint16_t queue_id)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
goto out_free_async;
}
+ async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!async->pkts_cmpl_flag) {
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
+ dev->ifname, queue_id);
+ goto out_free_async;
+ }
+
if (vq_is_packed(dev)) {
async->buffers_packed = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem_packed),
}
}
- async->ops.check_completed_copies = ops->check_completed_copies;
- async->ops.transfer_data = ops->transfer_data;
-
vq->async = async;
return 0;
}
int
-rte_vhost_async_channel_register(int vid, uint16_t queue_id,
- struct rte_vhost_async_config config,
- struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
int ret;
- if (dev == NULL || ops == NULL)
+ if (dev == NULL)
return -1;
if (queue_id >= VHOST_MAX_VRING)
if (unlikely(vq == NULL || !dev->async_copy))
return -1;
- if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
- dev->ifname, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
rte_spinlock_lock(&vq->access_lock);
- ret = async_channel_register(vid, queue_id, ops);
+ ret = async_channel_register(vid, queue_id);
rte_spinlock_unlock(&vq->access_lock);
return ret;
}
int
-rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
- struct rte_vhost_async_config config,
- struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
- if (dev == NULL || ops == NULL)
+ if (dev == NULL)
return -1;
if (queue_id >= VHOST_MAX_VRING)
if (unlikely(vq == NULL || !dev->async_copy))
return -1;
- if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
- dev->ifname, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
- return async_channel_register(vid, queue_id, ops);
+ return async_channel_register(vid, queue_id);
}
int
}
if (vq->async->pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
- "async inflight packets must be completed before unregistration.\n",
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+ VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
dev->ifname);
ret = -1;
goto out;
return 0;
if (vq->async->pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
- "async inflight packets must be completed before unregistration.\n",
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+ VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
dev->ifname);
return -1;
}
return 0;
}
+int
+rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
+{
+ struct rte_dma_info info;
+ void *pkts_cmpl_flag_addr;
+ uint16_t max_desc;
+
+ if (!rte_dma_is_valid(dma_id)) {
+ VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
+ return -1;
+ }
+
+ rte_dma_info_get(dma_id, &info);
+ if (vchan_id >= info.max_vchans) {
+ VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+ return -1;
+ }
+
+ if (!dma_copy_track[dma_id].vchans) {
+ struct async_dma_vchan_info *vchans;
+
+ vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
+ RTE_CACHE_LINE_SIZE);
+ if (vchans == NULL) {
+ VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
+ dma_id, vchan_id);
+ return -1;
+ }
+
+ dma_copy_track[dma_id].vchans = vchans;
+ }
+
+ if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
+ VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
+ vchan_id);
+ return 0;
+ }
+
+ max_desc = info.max_desc;
+ if (!rte_is_power_of_2(max_desc))
+ max_desc = rte_align32pow2(max_desc);
+
+ pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
+ if (!pkts_cmpl_flag_addr) {
+ VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
+ "vChannel %u.\n", dma_id, vchan_id);
+
+ if (dma_copy_track[dma_id].nr_vchans == 0) {
+ rte_free(dma_copy_track[dma_id].vchans);
+ dma_copy_track[dma_id].vchans = NULL;
+ }
+ return -1;
+ }
+
+ dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
+ dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc;
+ dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
+ dma_copy_track[dma_id].nr_vchans++;
+
+ return 0;
+}
+
int
rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
{