return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
-/*
- * Reclaim all the outstanding zmbufs for a virtqueue.
- */
-static void
-drain_zmbuf_list(struct vhost_virtqueue *vq)
-{
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- while (!mbuf_is_consumed(zmbuf->mbuf))
- usleep(1000);
-
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- restore_mbuf(zmbuf->mbuf);
- rte_pktmbuf_free(zmbuf->mbuf);
- put_zmbuf(zmbuf);
- vq->nr_zmbuf -= 1;
- }
-}
-
static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
- struct vhost_virtqueue *vq;
if (!dev || !dev->mem)
return;
- if (dev->dequeue_zero_copy) {
- for (i = 0; i < dev->nr_vring; i++) {
- vq = dev->virtqueue[i];
- if (vq)
- drain_zmbuf_list(vq);
- }
- }
-
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
dev->features = features;
if (dev->features &
- ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
+ ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_RING_PACKED))) {
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
return RTE_VHOST_MSG_RESULT_ERR;
}
- if (dev->dequeue_zero_copy) {
- vq->nr_zmbuf = 0;
- vq->last_zmbuf_idx = 0;
- vq->zmbuf_size = vq->size;
- if (vq->zmbufs)
- rte_free(vq->zmbufs);
- vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
- sizeof(struct zcopy_mbuf), 0);
- if (vq->zmbufs == NULL) {
- VHOST_LOG_CONFIG(WARNING,
- "failed to allocate mem for zero copy; "
- "zero copy is force disabled\n");
- dev->dequeue_zero_copy = 0;
- }
- TAILQ_INIT(&vq->zmbuf_list);
- }
-
if (vq_is_packed(dev)) {
if (vq->shadow_used_packed)
rte_free(vq->shadow_used_packed);
int oldnode, newnode;
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
- struct zcopy_mbuf *new_zmbuf;
struct vring_used_elem *new_shadow_used_split;
struct vring_used_elem_packed *new_shadow_used_packed;
struct batch_copy_elem *new_batch_copy_elems;
return dev;
memcpy(vq, old_vq, sizeof(*vq));
- TAILQ_INIT(&vq->zmbuf_list);
-
- if (dev->dequeue_zero_copy) {
- new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
- sizeof(struct zcopy_mbuf), 0, newnode);
- if (new_zmbuf) {
- rte_free(vq->zmbufs);
- vq->zmbufs = new_zmbuf;
- }
- }
if (vq_is_packed(dev)) {
new_shadow_used_packed = rte_malloc_socket(NULL,
goto err_mmap;
}
- populate = (dev->dequeue_zero_copy || dev->async_copy) ?
- MAP_POPULATE : 0;
+ populate = dev->async_copy ? MAP_POPULATE : 0;
mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
MAP_SHARED | populate, fd, 0);
reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
mmap_offset;
- if (dev->dequeue_zero_copy || dev->async_copy)
+ if (dev->async_copy)
if (add_guest_pages(dev, reg, alignment) < 0) {
VHOST_LOG_CONFIG(ERR,
"adding guest pages to region %u failed.\n",
vq->enabled;
}
-#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
+#define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
static int
virtio_is_ready(struct virtio_net *dev)
{
struct vhost_virtqueue *vq;
- uint32_t i;
+ uint32_t i, nr_vring = dev->nr_vring;
if (dev->flags & VIRTIO_DEV_READY)
return 1;
- if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
+ if (!dev->nr_vring)
return 0;
- for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
+ if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
+ nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
+
+ if (dev->nr_vring < nr_vring)
+ return 0;
+ }
+
+ for (i = 0; i < nr_vring; i++) {
vq = dev->virtqueue[i];
if (!vq_is_ready(dev, vq))
return RTE_VHOST_MSG_RESULT_OK;
}
-static void
-free_zmbufs(struct vhost_virtqueue *vq)
-{
- drain_zmbuf_list(vq);
-
- rte_free(vq->zmbufs);
- vq->zmbufs = NULL;
-}
-
/*
* when virtio is stopped, qemu will send us the GET_VRING_BASE message.
*/
vq->signalled_used_valid = false;
- if (dev->dequeue_zero_copy)
- free_zmbufs(vq);
if (vq_is_packed(dev)) {
rte_free(vq->shadow_used_packed);
vq->shadow_used_packed = NULL;
vq->shadow_used_split = NULL;
if (vq->async_pkts_pending)
rte_free(vq->async_pkts_pending);
- if (vq->async_pending_info)
- rte_free(vq->async_pending_info);
+ if (vq->async_pkts_info)
+ rte_free(vq->async_pkts_info);
vq->async_pkts_pending = NULL;
- vq->async_pending_info = NULL;
+ vq->async_pkts_info = NULL;
}
rte_free(vq->batch_copy_elems);
"set queue enable: %d to qp idx: %d\n",
enable, index);
- if (!enable && dev->virtqueue[index]->async_registered) {
+ if (enable && dev->virtqueue[index]->async_registered) {
if (dev->virtqueue[index]->async_pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "failed to disable vring. "
+ VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
"async inflight packets must be completed first\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
}
- /* On disable, rings have to be stopped being processed. */
- if (!enable && dev->dequeue_zero_copy)
- drain_zmbuf_list(dev->virtqueue[index]);
-
dev->virtqueue[index]->enabled = enable;
return RTE_VHOST_MSG_RESULT_OK;