net/bnxt: fix null dereference in session cleanup
[dpdk.git] / lib / vhost / vhost.c
index 996287c..bc88148 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/vhost.h>
 #include <linux/virtio_net.h>
-#include <stddef.h>
 #include <stdint.h>
 #include <stdlib.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #endif
 
 #include <rte_errno.h>
-#include <rte_ethdev.h>
 #include <rte_log.h>
-#include <rte_string_fns.h>
 #include <rte_memory.h>
 #include <rte_malloc.h>
 #include <rte_vhost.h>
-#include <rte_rwlock.h>
 
 #include "iotlb.h"
 #include "vhost.h"
 #include "vhost_user.h"
 
-struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
 
 /* Called with iotlb_lock read-locked */
@@ -56,11 +52,10 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
                 */
                vhost_user_iotlb_rd_unlock(vq);
 
-               vhost_user_iotlb_pending_insert(vq, iova, perm);
+               vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
                if (vhost_user_iotlb_miss(dev, iova, perm)) {
-                       VHOST_LOG_CONFIG(ERR,
-                               "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
-                               iova);
+                       VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+                               dev->ifname, iova);
                        vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
                }
 
@@ -126,8 +121,8 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
        hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
        if (map_len != len) {
                VHOST_LOG_DATA(ERR,
-                       "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
-                       iova);
+                       "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+                       dev->ifname, iova);
                return;
        }
 
@@ -243,8 +238,8 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
        hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
        if (map_len != len) {
                VHOST_LOG_DATA(ERR,
-                       "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
-                       iova);
+                       "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+                       dev->ifname, iova);
                return;
        }
 
@@ -340,19 +335,19 @@ cleanup_device(struct virtio_net *dev, int destroy)
 static void
 vhost_free_async_mem(struct vhost_virtqueue *vq)
 {
-       rte_free(vq->async_pkts_info);
+       if (!vq->async)
+               return;
 
-       rte_free(vq->async_buffers_packed);
-       vq->async_buffers_packed = NULL;
-       rte_free(vq->async_descs_split);
-       vq->async_descs_split = NULL;
+       rte_free(vq->async->pkts_info);
+       rte_free(vq->async->pkts_cmpl_flag);
 
-       rte_free(vq->it_pool);
-       rte_free(vq->vec_pool);
+       rte_free(vq->async->buffers_packed);
+       vq->async->buffers_packed = NULL;
+       rte_free(vq->async->descs_split);
+       vq->async->descs_split = NULL;
 
-       vq->async_pkts_info = NULL;
-       vq->it_pool = NULL;
-       vq->vec_pool = NULL;
+       rte_free(vq->async);
+       vq->async = NULL;
 }
 
 void
@@ -422,10 +417,10 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
                gpa = hva_to_gpa(dev, hva, exp_size);
                if (!gpa) {
-                       VHOST_LOG_CONFIG(ERR,
-                               "VQ: Failed to find GPA for log_addr: 0x%"
+                       VHOST_LOG_DATA(ERR,
+                               "(%s) failed to find GPA for log_addr: 0x%"
                                PRIx64 " hva: 0x%" PRIx64 "\n",
-                               log_addr, hva);
+                               dev->ifname, log_addr, hva);
                        return 0;
                }
                return gpa;
@@ -552,16 +547,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
        int numa_node = SOCKET_ID_ANY;
 
        if (vring_idx >= VHOST_MAX_VRING) {
-               VHOST_LOG_CONFIG(ERR,
-                               "Failed not init vring, out of bound (%d)\n",
-                               vring_idx);
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n",
+                               dev->ifname, vring_idx);
                return;
        }
 
        vq = dev->virtqueue[vring_idx];
        if (!vq) {
-               VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
-                               vring_idx);
+               VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
+                               dev->ifname, vring_idx);
                return;
        }
 
@@ -573,8 +567,8 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
 #ifdef RTE_LIBRTE_VHOST_NUMA
        if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
-               VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n",
-                       dev->vid, rte_strerror(errno));
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
+                       dev->ifname, rte_strerror(errno));
                numa_node = SOCKET_ID_ANY;
        }
 #endif
@@ -591,15 +585,15 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
        if (vring_idx >= VHOST_MAX_VRING) {
                VHOST_LOG_CONFIG(ERR,
-                               "Failed not init vring, out of bound (%d)\n",
-                               vring_idx);
+                               "(%s) failed to reset vring, out of bound (%d)\n",
+                               dev->ifname, vring_idx);
                return;
        }
 
        vq = dev->virtqueue[vring_idx];
        if (!vq) {
-               VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
-                               vring_idx);
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n",
+                               dev->ifname, vring_idx);
                return;
        }
 
@@ -621,8 +615,8 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
                vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
                if (vq == NULL) {
-                       VHOST_LOG_CONFIG(ERR,
-                               "Failed to allocate memory for vring:%u.\n", i);
+                       VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n",
+                                       dev->ifname, i);
                        return -1;
                }
 
@@ -668,22 +662,20 @@ vhost_new_device(void)
        int i;
 
        pthread_mutex_lock(&vhost_dev_lock);
-       for (i = 0; i < MAX_VHOST_DEVICE; i++) {
+       for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
                if (vhost_devices[i] == NULL)
                        break;
        }
 
-       if (i == MAX_VHOST_DEVICE) {
-               VHOST_LOG_CONFIG(ERR,
-                       "Failed to find a free slot for new device.\n");
+       if (i == RTE_MAX_VHOST_DEVICE) {
+               VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
                pthread_mutex_unlock(&vhost_dev_lock);
                return -1;
        }
 
        dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
        if (dev == NULL) {
-               VHOST_LOG_CONFIG(ERR,
-                       "Failed to allocate memory for new dev.\n");
+               VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n");
                pthread_mutex_unlock(&vhost_dev_lock);
                return -1;
        }
@@ -835,9 +827,8 @@ rte_vhost_get_numa_node(int vid)
        ret = get_mempolicy(&numa_node, NULL, 0, dev,
                            MPOL_F_NODE | MPOL_F_ADDR);
        if (ret < 0) {
-               VHOST_LOG_CONFIG(ERR,
-                       "(%d) failed to query numa node: %s\n",
-                       vid, rte_strerror(errno));
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
+                       dev->ifname, rte_strerror(errno));
                return -1;
        }
 
@@ -1226,6 +1217,9 @@ rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
        if (unlikely(!vq->inflight_split))
                return -1;
 
+       if (unlikely(idx >= vq->size))
+               return -1;
+
        vq->inflight_split->last_inflight_io = idx;
        return 0;
 }
@@ -1468,8 +1462,8 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
                return 0;
 
        if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
-               VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
-                       dev->vid, __func__, qid);
+               VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+                       dev->ifname, __func__, qid);
                return 0;
        }
 
@@ -1500,7 +1494,8 @@ rte_vhost_get_vdpa_device(int vid)
        return dev->vdpa_dev;
 }
 
-int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+int
+rte_vhost_get_log_base(int vid, uint64_t *log_base,
                uint64_t *log_size)
 {
        struct virtio_net *dev = get_device(vid);
@@ -1514,7 +1509,8 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base,
        return 0;
 }
 
-int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+int
+rte_vhost_get_vring_base(int vid, uint16_t queue_id,
                uint16_t *last_avail_idx, uint16_t *last_used_idx)
 {
        struct vhost_virtqueue *vq;
@@ -1543,7 +1539,8 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
        return 0;
 }
 
-int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+int
+rte_vhost_set_vring_base(int vid, uint16_t queue_id,
                uint16_t last_avail_idx, uint16_t last_used_idx)
 {
        struct vhost_virtqueue *vq;
@@ -1606,7 +1603,8 @@ rte_vhost_get_vring_base_from_inflight(int vid,
        return 0;
 }
 
-int rte_vhost_extern_callback_register(int vid,
+int
+rte_vhost_extern_callback_register(int vid,
                struct rte_vhost_user_extern_ops const * const ops, void *ctx)
 {
        struct virtio_net *dev = get_device(vid);
@@ -1620,94 +1618,82 @@ int rte_vhost_extern_callback_register(int vid,
 }
 
 static __rte_always_inline int
-async_channel_register(int vid, uint16_t queue_id,
-               struct rte_vhost_async_channel_ops *ops)
+async_channel_register(int vid, uint16_t queue_id)
 {
        struct virtio_net *dev = get_device(vid);
        struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+       struct vhost_async *async;
+       int node = vq->numa_node;
 
-       if (unlikely(vq->async_registered)) {
+       if (unlikely(vq->async)) {
                VHOST_LOG_CONFIG(ERR,
-                       "async register failed: channel already registered "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
+                               "(%s) async register failed: already registered (qid: %d)\n",
+                               dev->ifname, queue_id);
                return -1;
        }
 
-       vq->async_pkts_info = rte_malloc_socket(NULL,
-                       vq->size * sizeof(struct async_inflight_info),
-                       RTE_CACHE_LINE_SIZE, vq->numa_node);
-       if (!vq->async_pkts_info) {
-               vhost_free_async_mem(vq);
-               VHOST_LOG_CONFIG(ERR,
-                       "async register failed: cannot allocate memory for async_pkts_info "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
+       async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
+       if (!async) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n",
+                               dev->ifname, queue_id);
                return -1;
        }
 
-       vq->it_pool = rte_malloc_socket(NULL,
-                       VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
-                       RTE_CACHE_LINE_SIZE, vq->numa_node);
-       if (!vq->it_pool) {
-               vhost_free_async_mem(vq);
-               VHOST_LOG_CONFIG(ERR,
-                       "async register failed: cannot allocate memory for it_pool "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
-               return -1;
+       async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
+                       RTE_CACHE_LINE_SIZE, node);
+       if (!async->pkts_info) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n",
+                               dev->ifname, queue_id);
+               goto out_free_async;
        }
 
-       vq->vec_pool = rte_malloc_socket(NULL,
-                       VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
-                       RTE_CACHE_LINE_SIZE, vq->numa_node);
-       if (!vq->vec_pool) {
-               vhost_free_async_mem(vq);
-               VHOST_LOG_CONFIG(ERR,
-                       "async register failed: cannot allocate memory for vec_pool "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
-               return -1;
+       async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
+                       RTE_CACHE_LINE_SIZE, node);
+       if (!async->pkts_cmpl_flag) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
+                               dev->ifname, queue_id);
+               goto out_free_async;
        }
 
        if (vq_is_packed(dev)) {
-               vq->async_buffers_packed = rte_malloc_socket(NULL,
-                       vq->size * sizeof(struct vring_used_elem_packed),
-                       RTE_CACHE_LINE_SIZE, vq->numa_node);
-               if (!vq->async_buffers_packed) {
-                       vhost_free_async_mem(vq);
-                       VHOST_LOG_CONFIG(ERR,
-                               "async register failed: cannot allocate memory for async buffers "
-                               "(vid %d, qid: %d)\n", vid, queue_id);
-                       return -1;
+               async->buffers_packed = rte_malloc_socket(NULL,
+                               vq->size * sizeof(struct vring_used_elem_packed),
+                               RTE_CACHE_LINE_SIZE, node);
+               if (!async->buffers_packed) {
+                       VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n",
+                                       dev->ifname, queue_id);
+                       goto out_free_inflight;
                }
        } else {
-               vq->async_descs_split = rte_malloc_socket(NULL,
-                       vq->size * sizeof(struct vring_used_elem),
-                       RTE_CACHE_LINE_SIZE, vq->numa_node);
-               if (!vq->async_descs_split) {
-                       vhost_free_async_mem(vq);
-                       VHOST_LOG_CONFIG(ERR,
-                               "async register failed: cannot allocate memory for async descs "
-                               "(vid %d, qid: %d)\n", vid, queue_id);
-                       return -1;
+               async->descs_split = rte_malloc_socket(NULL,
+                               vq->size * sizeof(struct vring_used_elem),
+                               RTE_CACHE_LINE_SIZE, node);
+               if (!async->descs_split) {
+                       VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n",
+                                       dev->ifname, queue_id);
+                       goto out_free_inflight;
                }
        }
 
-       vq->async_ops.check_completed_copies = ops->check_completed_copies;
-       vq->async_ops.transfer_data = ops->transfer_data;
-
-       vq->async_registered = true;
+       vq->async = async;
 
        return 0;
+out_free_inflight:
+       rte_free(async->pkts_info);
+out_free_async:
+       rte_free(async);
+
+       return -1;
 }
 
 int
-rte_vhost_async_channel_register(int vid, uint16_t queue_id,
-               struct rte_vhost_async_config config,
-               struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register(int vid, uint16_t queue_id)
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
        int ret;
 
-       if (dev == NULL || ops == NULL)
+       if (dev == NULL)
                return -1;
 
        if (queue_id >= VHOST_MAX_VRING)
@@ -1718,33 +1704,20 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
        if (unlikely(vq == NULL || !dev->async_copy))
                return -1;
 
-       if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
-               VHOST_LOG_CONFIG(ERR,
-                       "async copy is not supported on non-inorder mode "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
-               return -1;
-       }
-
-       if (unlikely(ops->check_completed_copies == NULL ||
-               ops->transfer_data == NULL))
-               return -1;
-
        rte_spinlock_lock(&vq->access_lock);
-       ret = async_channel_register(vid, queue_id, ops);
+       ret = async_channel_register(vid, queue_id);
        rte_spinlock_unlock(&vq->access_lock);
 
        return ret;
 }
 
 int
-rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
-               struct rte_vhost_async_config config,
-               struct rte_vhost_async_channel_ops *ops)
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
 
-       if (dev == NULL || ops == NULL)
+       if (dev == NULL)
                return -1;
 
        if (queue_id >= VHOST_MAX_VRING)
@@ -1755,18 +1728,7 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
        if (unlikely(vq == NULL || !dev->async_copy))
                return -1;
 
-       if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
-               VHOST_LOG_CONFIG(ERR,
-                       "async copy is not supported on non-inorder mode "
-                       "(vid %d, qid: %d)\n", vid, queue_id);
-               return -1;
-       }
-
-       if (unlikely(ops->check_completed_copies == NULL ||
-               ops->transfer_data == NULL))
-               return -1;
-
-       return async_channel_register(vid, queue_id, ops);
+       return async_channel_register(vid, queue_id);
 }
 
 int
@@ -1789,28 +1751,24 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
 
        ret = 0;
 
-       if (!vq->async_registered)
+       if (!vq->async)
                return ret;
 
        if (!rte_spinlock_trylock(&vq->access_lock)) {
-               VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-                       "virt queue busy.\n");
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
+                               dev->ifname);
                return -1;
        }
 
-       if (vq->async_pkts_inflight_n) {
-               VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-                       "async inflight packets must be completed before unregistration.\n");
+       if (vq->async->pkts_inflight_n) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+               VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
+                       dev->ifname);
                ret = -1;
                goto out;
        }
 
        vhost_free_async_mem(vq);
-
-       vq->async_ops.transfer_data = NULL;
-       vq->async_ops.check_completed_copies = NULL;
-       vq->async_registered = false;
-
 out:
        rte_spinlock_unlock(&vq->access_lock);
 
@@ -1834,25 +1792,85 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
        if (vq == NULL)
                return -1;
 
-       if (!vq->async_registered)
+       if (!vq->async)
                return 0;
 
-       if (vq->async_pkts_inflight_n) {
-               VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-                       "async inflight packets must be completed before unregistration.\n");
+       if (vq->async->pkts_inflight_n) {
+               VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
+               VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
+                       dev->ifname);
                return -1;
        }
 
        vhost_free_async_mem(vq);
 
-       vq->async_ops.transfer_data = NULL;
-       vq->async_ops.check_completed_copies = NULL;
-       vq->async_registered = false;
+       return 0;
+}
+
+int
+rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
+{
+       struct rte_dma_info info;
+       void *pkts_cmpl_flag_addr;
+       uint16_t max_desc;
+
+       if (!rte_dma_is_valid(dma_id)) {
+               VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
+               return -1;
+       }
+
+       rte_dma_info_get(dma_id, &info);
+       if (vchan_id >= info.max_vchans) {
+               VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+               return -1;
+       }
+
+       if (!dma_copy_track[dma_id].vchans) {
+               struct async_dma_vchan_info *vchans;
+
+               vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
+                               RTE_CACHE_LINE_SIZE);
+               if (vchans == NULL) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
+                                       dma_id, vchan_id);
+                       return -1;
+               }
+
+               dma_copy_track[dma_id].vchans = vchans;
+       }
+
+       if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
+               VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
+                               vchan_id);
+               return 0;
+       }
+
+       max_desc = info.max_desc;
+       if (!rte_is_power_of_2(max_desc))
+               max_desc = rte_align32pow2(max_desc);
+
+       pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
+       if (!pkts_cmpl_flag_addr) {
+               VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
+                               "vChannel %u.\n", dma_id, vchan_id);
+
+               if (dma_copy_track[dma_id].nr_vchans == 0) {
+                       rte_free(dma_copy_track[dma_id].vchans);
+                       dma_copy_track[dma_id].vchans = NULL;
+               }
+               return -1;
+       }
+
+       dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
+       dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc;
+       dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
+       dma_copy_track[dma_id].nr_vchans++;
 
        return 0;
 }
 
-int rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
+int
+rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
@@ -1869,20 +1887,59 @@ int rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
        if (vq == NULL)
                return ret;
 
-       if (!vq->async_registered)
+       if (!vq->async)
                return ret;
 
        if (!rte_spinlock_trylock(&vq->access_lock)) {
-               VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. "
-                       "virt queue busy.\n");
+               VHOST_LOG_CONFIG(DEBUG,
+                       "(%s) failed to check in-flight packets. virtqueue busy.\n",
+                       dev->ifname);
                return ret;
        }
 
-       ret = vq->async_pkts_inflight_n;
+       ret = vq->async->pkts_inflight_n;
        rte_spinlock_unlock(&vq->access_lock);
 
        return ret;
 }
 
+int
+rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
+               struct rte_vhost_power_monitor_cond *pmc)
+{
+       struct virtio_net *dev = get_device(vid);
+       struct vhost_virtqueue *vq;
+
+       if (dev == NULL)
+               return -1;
+       if (queue_id >= VHOST_MAX_VRING)
+               return -1;
+
+       vq = dev->virtqueue[queue_id];
+       if (vq == NULL)
+               return -1;
+
+       if (vq_is_packed(dev)) {
+               struct vring_packed_desc *desc;
+               desc = vq->desc_packed;
+               pmc->addr = &desc[vq->last_avail_idx].flags;
+               if (vq->avail_wrap_counter)
+                       pmc->val = VRING_DESC_F_AVAIL;
+               else
+                       pmc->val = VRING_DESC_F_USED;
+               pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED;
+               pmc->size = sizeof(desc[vq->last_avail_idx].flags);
+               pmc->match = 1;
+       } else {
+               pmc->addr = &vq->avail->idx;
+               pmc->val = vq->last_avail_idx & (vq->size - 1);
+               pmc->mask = vq->size - 1;
+               pmc->size = sizeof(vq->avail->idx);
+               pmc->match = 0;
+       }
+
+       return 0;
+}
+
 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);