static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
{
- rte_free(vq->async_pkts_info);
+ if (!vq->async)
+ return;
- rte_free(vq->async_buffers_packed);
- vq->async_buffers_packed = NULL;
- rte_free(vq->async_descs_split);
- vq->async_descs_split = NULL;
+ rte_free(vq->async->pkts_info);
- rte_free(vq->it_pool);
- rte_free(vq->vec_pool);
+ rte_free(vq->async->buffers_packed);
+ vq->async->buffers_packed = NULL;
+ rte_free(vq->async->descs_split);
+ vq->async->descs_split = NULL;
- vq->async_pkts_info = NULL;
- vq->it_pool = NULL;
- vq->vec_pool = NULL;
+ rte_free(vq->async);
+ vq->async = NULL;
}
void
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+ struct vhost_async *async;
+ int node = vq->numa_node;
- if (unlikely(vq->async_registered)) {
+ if (unlikely(vq->async)) {
VHOST_LOG_CONFIG(ERR,
- "async register failed: channel already registered "
- "(vid %d, qid: %d)\n", vid, queue_id);
+ "async register failed: already registered (vid %d, qid: %d)\n",
+ vid, queue_id);
return -1;
}
- vq->async_pkts_info = rte_malloc_socket(NULL,
- vq->size * sizeof(struct async_inflight_info),
- RTE_CACHE_LINE_SIZE, vq->numa_node);
- if (!vq->async_pkts_info) {
- vhost_free_async_mem(vq);
- VHOST_LOG_CONFIG(ERR,
- "async register failed: cannot allocate memory for async_pkts_info "
- "(vid %d, qid: %d)\n", vid, queue_id);
+ async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
+ if (!async) {
+ VHOST_LOG_CONFIG(ERR, "failed to allocate async metadata (vid %d, qid: %d)\n",
+ vid, queue_id);
return -1;
}
- vq->it_pool = rte_malloc_socket(NULL,
- VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
- RTE_CACHE_LINE_SIZE, vq->numa_node);
- if (!vq->it_pool) {
- vhost_free_async_mem(vq);
- VHOST_LOG_CONFIG(ERR,
- "async register failed: cannot allocate memory for it_pool "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
- }
-
- vq->vec_pool = rte_malloc_socket(NULL,
- VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
- RTE_CACHE_LINE_SIZE, vq->numa_node);
- if (!vq->vec_pool) {
- vhost_free_async_mem(vq);
- VHOST_LOG_CONFIG(ERR,
- "async register failed: cannot allocate memory for vec_pool "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
+ async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!async->pkts_info) {
+ VHOST_LOG_CONFIG(ERR, "failed to allocate async_pkts_info (vid %d, qid: %d)\n",
+ vid, queue_id);
+ goto out_free_async;
}
if (vq_is_packed(dev)) {
- vq->async_buffers_packed = rte_malloc_socket(NULL,
- vq->size * sizeof(struct vring_used_elem_packed),
- RTE_CACHE_LINE_SIZE, vq->numa_node);
- if (!vq->async_buffers_packed) {
- vhost_free_async_mem(vq);
- VHOST_LOG_CONFIG(ERR,
- "async register failed: cannot allocate memory for async buffers "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
+ async->buffers_packed = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!async->buffers_packed) {
+ VHOST_LOG_CONFIG(ERR, "failed to allocate async buffers (vid %d, qid: %d)\n",
+ vid, queue_id);
+ goto out_free_inflight;
}
} else {
- vq->async_descs_split = rte_malloc_socket(NULL,
- vq->size * sizeof(struct vring_used_elem),
- RTE_CACHE_LINE_SIZE, vq->numa_node);
- if (!vq->async_descs_split) {
- vhost_free_async_mem(vq);
- VHOST_LOG_CONFIG(ERR,
- "async register failed: cannot allocate memory for async descs "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
+ async->descs_split = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!async->descs_split) {
+ VHOST_LOG_CONFIG(ERR, "failed to allocate async descs (vid %d, qid: %d)\n",
+ vid, queue_id);
+ goto out_free_inflight;
}
}
- vq->async_ops.check_completed_copies = ops->check_completed_copies;
- vq->async_ops.transfer_data = ops->transfer_data;
+ async->ops.check_completed_copies = ops->check_completed_copies;
+ async->ops.transfer_data = ops->transfer_data;
- vq->async_registered = true;
+ vq->async = async;
return 0;
+out_free_inflight:
+ rte_free(async->pkts_info);
+out_free_async:
+ rte_free(async);
+
+ return -1;
}
int
ret = 0;
- if (!vq->async_registered)
+ if (!vq->async)
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
return -1;
}
- if (vq->async_pkts_inflight_n) {
+ if (vq->async->pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"async inflight packets must be completed before unregistration.\n");
ret = -1;
}
vhost_free_async_mem(vq);
-
- vq->async_ops.transfer_data = NULL;
- vq->async_ops.check_completed_copies = NULL;
- vq->async_registered = false;
-
out:
rte_spinlock_unlock(&vq->access_lock);
if (vq == NULL)
return -1;
- if (!vq->async_registered)
+ if (!vq->async)
return 0;
- if (vq->async_pkts_inflight_n) {
+ if (vq->async->pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"async inflight packets must be completed before unregistration.\n");
return -1;
vhost_free_async_mem(vq);
- vq->async_ops.transfer_data = NULL;
- vq->async_ops.check_completed_copies = NULL;
- vq->async_registered = false;
-
return 0;
}
if (vq == NULL)
return ret;
- if (!vq->async_registered)
+ if (!vq->async)
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
return ret;
}
- ret = vq->async_pkts_inflight_n;
+ ret = vq->async->pkts_inflight_n;
rte_spinlock_unlock(&vq->access_lock);
return ret;
uint16_t num_buffers;
uint16_t avail_head;
- struct rte_vhost_iov_iter *it_pool = vq->it_pool;
- struct iovec *vec_pool = vq->vec_pool;
+ struct vhost_async *async = vq->async;
+ struct rte_vhost_iov_iter *it_pool = async->it_pool;
+ struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
struct iovec *src_iovec = vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
int32_t n_xfer;
uint16_t segs_await = 0;
async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
&it_pool[it_idx + 1]);
- slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
pkts_info[slot_idx].descs = num_buffers;
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_xfer = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
}
if (pkt_burst_idx) {
- n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
} else {
/* keep used descriptors */
if (likely(vq->shadow_used_idx)) {
- uint16_t to = vq->async_desc_idx_split & (vq->size - 1);
+ uint16_t to = async->desc_idx_split & (vq->size - 1);
store_dma_desc_info_split(vq->shadow_used_split,
- vq->async_descs_split, vq->size, 0, to,
+ async->descs_split, vq->size, 0, to,
vq->shadow_used_idx);
- vq->async_desc_idx_split += vq->shadow_used_idx;
- vq->async_pkts_idx += pkt_idx;
- vq->async_pkts_inflight_n += pkt_idx;
+ async->desc_idx_split += vq->shadow_used_idx;
+ async->pkts_idx += pkt_idx;
+ async->pkts_inflight_n += pkt_idx;
vq->shadow_used_idx = 0;
}
{
uint16_t descs_err = 0;
uint16_t buffers_err = 0;
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ struct async_inflight_info *pkts_info = vq->async->pkts_info;
*pkt_idx -= nr_err;
/* calculate the sum of buffers and descs of DMA-error packets. */
uint16_t num_buffers;
uint16_t num_descs;
- struct rte_vhost_iov_iter *it_pool = vq->it_pool;
- struct iovec *vec_pool = vq->vec_pool;
+ struct vhost_async *async = vq->async;
+ struct rte_vhost_iov_iter *it_pool = async->it_pool;
+ struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
struct iovec *src_iovec = vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint16_t slot_idx = 0;
uint16_t segs_await = 0;
&it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
break;
- slot_idx = (vq->async_pkts_idx + pkt_idx) % vq->size;
+ slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
&it_pool[it_idx + 1]);
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_xfer = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
} else {
if (likely(vq->shadow_used_idx)) {
/* keep used descriptors. */
- store_dma_desc_info_packed(vq->shadow_used_packed, vq->async_buffers_packed,
- vq->size, 0, vq->async_buffer_idx_packed,
+ store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
+ vq->size, 0, async->buffer_idx_packed,
vq->shadow_used_idx);
- vq->async_buffer_idx_packed += vq->shadow_used_idx;
- if (vq->async_buffer_idx_packed >= vq->size)
- vq->async_buffer_idx_packed -= vq->size;
+ async->buffer_idx_packed += vq->shadow_used_idx;
+ if (async->buffer_idx_packed >= vq->size)
+ async->buffer_idx_packed -= vq->size;
- vq->async_pkts_idx += pkt_idx;
- if (vq->async_pkts_idx >= vq->size)
- vq->async_pkts_idx -= vq->size;
+ async->pkts_idx += pkt_idx;
+ if (async->pkts_idx >= vq->size)
+ async->pkts_idx -= vq->size;
vq->shadow_used_idx = 0;
- vq->async_pkts_inflight_n += pkt_idx;
+ async->pkts_inflight_n += pkt_idx;
}
return pkt_idx;
static __rte_always_inline void
write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
{
+ struct vhost_async *async = vq->async;
uint16_t nr_left = n_descs;
uint16_t nr_copy;
uint16_t to, from;
do {
- from = vq->last_async_desc_idx_split & (vq->size - 1);
+ from = async->last_desc_idx_split & (vq->size - 1);
nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
to = vq->last_used_idx & (vq->size - 1);
if (to + nr_copy <= vq->size) {
- rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
nr_copy * sizeof(struct vring_used_elem));
} else {
uint16_t size = vq->size - to;
- rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
size * sizeof(struct vring_used_elem));
- rte_memcpy(&vq->used->ring[0], &vq->async_descs_split[from + size],
+ rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
(nr_copy - size) * sizeof(struct vring_used_elem));
}
- vq->last_async_desc_idx_split += nr_copy;
+ async->last_desc_idx_split += nr_copy;
vq->last_used_idx += nr_copy;
nr_left -= nr_copy;
} while (nr_left > 0);
write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
{
+ struct vhost_async *async = vq->async;
uint16_t nr_left = n_buffers;
uint16_t from, to;
do {
- from = vq->last_async_buffer_idx_packed;
+ from = async->last_buffer_idx_packed;
to = (from + nr_left) % vq->size;
if (to > from) {
- vhost_update_used_packed(vq, vq->async_buffers_packed + from, to - from);
- vq->last_async_buffer_idx_packed += nr_left;
+ vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
+ async->last_buffer_idx_packed += nr_left;
nr_left = 0;
} else {
- vhost_update_used_packed(vq, vq->async_buffers_packed + from,
+ vhost_update_used_packed(vq, async->buffers_packed + from,
vq->size - from);
- vq->last_async_buffer_idx_packed = 0;
+ async->last_buffer_idx_packed = 0;
nr_left -= vq->size - from;
}
} while (nr_left > 0);
struct rte_mbuf **pkts, uint16_t count)
{
struct vhost_virtqueue *vq;
+ struct vhost_async *async;
struct async_inflight_info *pkts_info;
int32_t n_cpl;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t from, i;
vq = dev->virtqueue[queue_id];
- pkts_idx = vq->async_pkts_idx % vq->size;
- pkts_info = vq->async_pkts_info;
+ async = vq->async;
+ pkts_idx = async->pkts_idx % vq->size;
+ pkts_info = async->pkts_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
- vq_size, vq->async_pkts_inflight_n);
+ vq_size, async->pkts_inflight_n);
- if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(dev->vid,
- queue_id, 0, count - vq->async_last_pkts_n);
+ if (count > async->last_pkts_n) {
+ n_cpl = async->ops.check_completed_copies(dev->vid,
+ queue_id, 0, count - async->last_pkts_n);
if (likely(n_cpl >= 0)) {
n_pkts_cpl = n_cpl;
} else {
}
}
- n_pkts_cpl += vq->async_last_pkts_n;
+ n_pkts_cpl += async->last_pkts_n;
n_pkts_put = RTE_MIN(n_pkts_cpl, count);
if (unlikely(n_pkts_put == 0)) {
- vq->async_last_pkts_n = n_pkts_cpl;
+ async->last_pkts_n = n_pkts_cpl;
return 0;
}
pkts[i] = pkts_info[from].mbuf;
}
}
- vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
- vq->async_pkts_inflight_n -= n_pkts_put;
+ async->last_pkts_n = n_pkts_cpl - n_pkts_put;
+ async->pkts_inflight_n -= n_pkts_put;
if (likely(vq->enabled && vq->access_ok)) {
if (vq_is_packed(dev)) {
}
} else {
if (vq_is_packed(dev)) {
- vq->last_async_buffer_idx_packed += n_buffers;
- if (vq->last_async_buffer_idx_packed >= vq->size)
- vq->last_async_buffer_idx_packed -= vq->size;
+ async->last_buffer_idx_packed += n_buffers;
+ if (async->last_buffer_idx_packed >= vq->size)
+ async->last_buffer_idx_packed -= vq->size;
} else {
- vq->last_async_desc_idx_split += n_descs;
+ async->last_desc_idx_split += n_descs;
}
}
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
+ if (unlikely(!vq->async)) {
VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
dev->vid, __func__, queue_id);
return 0;
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
+ if (unlikely(!vq->async)) {
VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
dev->vid, __func__, queue_id);
return 0;
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(!vq->enabled || !vq->async_registered))
+ if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))