if (log_translate(dev, vq) < 0)
return -1;
- vq->access_ok = 1;
+ vq->access_ok = true;
return 0;
}
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_wr_lock(vq);
- vq->access_ok = 0;
+ vq->access_ok = false;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0 || vq->avail == NULL))
+ if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
struct vring_used *used;
struct vring_packed_desc_event *device_event;
};
- uint32_t size;
+ uint16_t size;
uint16_t last_avail_idx;
uint16_t last_used_idx;
#define VIRTIO_INVALID_EVENTFD (-1)
#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
- int enabled;
- int access_ok;
- int ready;
- int notif_enable;
-#define VIRTIO_UNINITIALIZED_NOTIF (-1)
+ bool enabled;
+ bool access_ok;
+ bool ready;
rte_spinlock_t access_lock;
- /* Used to notify the guest (trigger interrupt) */
- int callfd;
- /* Currently unused as polling mode is enabled */
- int kickfd;
-
- /* Physical address of used ring, for logging */
- uint64_t log_guest_addr;
-
- /* inflight share memory info */
- union {
- struct rte_vhost_inflight_info_split *inflight_split;
- struct rte_vhost_inflight_info_packed *inflight_packed;
- };
- struct rte_vhost_resubmit_info *resubmit_inflight;
- uint64_t global_counter;
union {
struct vring_used_elem *shadow_used_split;
uint16_t shadow_aligned_idx;
/* Record packed ring first dequeue desc index */
uint16_t shadow_last_used_idx;
- struct vhost_vring_addr ring_addrs;
- struct batch_copy_elem *batch_copy_elems;
uint16_t batch_copy_nb_elems;
+ struct batch_copy_elem *batch_copy_elems;
bool used_wrap_counter;
bool avail_wrap_counter;
- struct log_cache_entry *log_cache;
- uint16_t log_cache_nb_elem;
+ /* Physical address of used ring, for logging */
+ uint16_t log_cache_nb_elem;
+ uint64_t log_guest_addr;
+ struct log_cache_entry *log_cache;
rte_rwlock_t iotlb_lock;
rte_rwlock_t iotlb_pending_lock;
struct rte_mempool *iotlb_pool;
TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
- int iotlb_cache_nr;
TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
+ int iotlb_cache_nr;
+
+ /* Used to notify the guest (trigger interrupt) */
+ int callfd;
+ /* Currently unused as polling mode is enabled */
+ int kickfd;
+
+ /* inflight share memory info */
+ union {
+ struct rte_vhost_inflight_info_split *inflight_split;
+ struct rte_vhost_inflight_info_packed *inflight_packed;
+ };
+ struct rte_vhost_resubmit_info *resubmit_inflight;
+ uint64_t global_counter;
/* operation callbacks for async dma */
struct rte_vhost_async_channel_ops async_ops;
bool async_inorder;
bool async_registered;
uint16_t async_threshold;
+
+ int notif_enable;
+#define VIRTIO_UNINITIALIZED_NOTIF (-1)
+
+ struct vhost_vring_addr ring_addrs;
} __rte_cache_aligned;
/* Virtio device status as per Virtio specification */
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
+ if (msg->payload.state.num > 32768) {
+ VHOST_LOG_CONFIG(ERR, "invalid virtqueue size %u\n", msg->payload.state.num);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
vq->size = msg->payload.state.num;
/* VIRTIO 1.0, 2.4 Virtqueues says:
}
}
- if (vq->size > 32768) {
- VHOST_LOG_CONFIG(ERR,
- "invalid virtqueue size %u\n", vq->size);
- return RTE_VHOST_MSG_RESULT_ERR;
- }
-
if (vq_is_packed(dev)) {
if (vq->shadow_used_packed)
rte_free(vq->shadow_used_packed);
return dev;
}
- vq->access_ok = 1;
+ vq->access_ok = true;
return dev;
}
vq->last_avail_idx = vq->used->idx;
}
- vq->access_ok = 1;
+ vq->access_ok = true;
VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
vq = dev->virtqueue[file.index];
if (vq->ready) {
- vq->ready = 0;
+ vq->ready = false;
vhost_user_notify_queue_state(dev, file.index, 0);
}
* the SET_VRING_ENABLE message.
*/
if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
- vq->enabled = 1;
+ vq->enabled = true;
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(
dev->vid, file.index, 1);
}
if (vq->ready) {
- vq->ready = 0;
+ vq->ready = false;
vhost_user_notify_queue_state(dev, file.index, 0);
}
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
- int enable = (int)msg->payload.state.num;
+ bool enable = !!msg->payload.state.num;
int index = (int)msg->payload.state.index;
if (validate_msg_fds(msg, 0) != 0)
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0))
+ if (unlikely(!vq->enabled))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0))
goto out;
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0 || !vq->async_registered))
+ if (unlikely(!vq->enabled || !vq->async_registered))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0))
goto out;
if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
return 0;
- if (unlikely(vq->enabled == 0)) {
+ if (unlikely(!vq->enabled)) {
count = 0;
goto out_access_unlock;
}
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0)) {
count = 0;
goto out;