rte_free(dev);
}
+int
+vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint64_t size;
+
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ goto out;
+
+ size = sizeof(struct vring_desc) * vq->size;
+ vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.desc_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->desc)
+ return -1;
+
+ size = sizeof(struct vring_avail);
+ size += sizeof(uint16_t) * vq->size;
+ vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.avail_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->avail)
+ return -1;
+
+ size = sizeof(struct vring_used);
+ size += sizeof(struct vring_used_elem) * vq->size;
+ vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.used_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->used)
+ return -1;
+
+out:
+ vq->access_ok = 1;
+
+ return 0;
+}
+
static void
init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
/* Currently unused as polling mode is enabled */
int kickfd;
int enabled;
+ int access_ok;
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t iova, uint64_t size, uint8_t perm);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
static __rte_always_inline uint64_t
vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
*/
memcpy(&vq->ring_addrs, addr, sizeof(*addr));
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+
+ vq->access_ok = 0;
+
return 0;
}
vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->desc_user_addr, sizeof(struct vring_desc));
if (vq->desc == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
dev->vid);
- return NULL;
+ return dev;
}
dev = numa_realloc(dev, vq_index);
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, sizeof(struct vring_avail));
if (vq->avail == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
dev->vid);
- return NULL;
+ return dev;
}
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, sizeof(struct vring_used));
if (vq->used == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
dev->vid);
- return NULL;
+ return dev;
}
if (vq->last_used_idx != vq->used->idx) {
static int
vq_is_ready(struct vhost_virtqueue *vq)
{
- return vq && vq->desc &&
+ return vq && vq->desc && vq->avail && vq->used &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
}
}
static int
-vhost_user_iotlb_msg(struct virtio_net *dev, struct VhostUserMsg *msg)
+is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
{
+ struct vhost_vring_addr *ra;
+ uint64_t start, end;
+
+ start = imsg->iova;
+ end = start + imsg->size;
+
+ ra = &vq->ring_addrs;
+ if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
+ return 1;
+ if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
+ return 1;
+ if (ra->used_user_addr >= start && ra->used_user_addr < end)
+ return 1;
+
+ return 0;
+}
+
+static int
+vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
+{
+ struct virtio_net *dev = *pdev;
struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
uint16_t i;
uint64_t vva;
vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
imsg->size, imsg->perm);
+
+ if (is_vring_iotlb_update(vq, imsg))
+ *pdev = dev = translate_ring_addresses(dev, i);
}
break;
case VHOST_IOTLB_INVALIDATE:
}
ret = 0;
- RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request]);
+ if (msg.request != VHOST_USER_IOTLB_MSG)
+ RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[msg.request]);
+ else
+ RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[msg.request]);
ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
if (ret < 0) {
break;
case VHOST_USER_IOTLB_MSG:
- ret = vhost_user_iotlb_msg(dev, &msg);
+ ret = vhost_user_iotlb_msg(&dev, &msg);
break;
default:
}
- /*
- * The virtio_net struct might have been reallocated on a different
- * NUMA node, so dev pointer might no more be valid.
- */
- dev = get_device(vid);
-
if (msg.flags & VHOST_USER_NEED_REPLY) {
msg.payload.u64 = !!ret;
msg.size = sizeof(msg.payload.u64);
if (unlikely(vq->enabled == 0))
return 0;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0)) {
+ if (unlikely(vring_translate(dev, vq) < 0)) {
+ count = 0;
+ goto out;
+ }
+ }
+
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
start_idx = vq->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
if (count == 0)
- return 0;
+ goto out;
LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
dev->vid, start_idx, start_idx + count);
}
rte_prefetch0(&vq->desc[desc_indexes[0]]);
-
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
-
for (i = 0; i < count; i++) {
uint16_t desc_idx = desc_indexes[i];
int err;
do_data_copy_enqueue(dev, vq);
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_unlock(vq);
-
rte_smp_wmb();
*(volatile uint16_t *)&vq->used->idx += count;
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
&& (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
return count;
}
if (unlikely(vq->enabled == 0))
return 0;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
if (count == 0)
- return 0;
+ goto out;
vq->batch_copy_nb_elems = 0;
vq->shadow_used_idx = 0;
avail_head = *((volatile uint16_t *)&vq->avail->idx);
-
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
-
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
do_data_copy_enqueue(dev, vq);
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_unlock(vq);
-
if (likely(vq->shadow_used_idx)) {
flush_shadow_used_ring(dev, vq);
eventfd_write(vq->callfd, (eventfd_t)1);
}
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
return pkt_idx;
}
vq->batch_copy_nb_elems = 0;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
int nr_updated = 0;
/* Prefetch descriptor index. */
rte_prefetch0(&vq->desc[desc_indexes[0]]);
-
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
-
for (i = 0; i < count; i++) {
struct vring_desc *desc;
uint16_t sz, idx;
TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
}
}
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_unlock(vq);
-
vq->last_avail_idx += i;
if (likely(dev->dequeue_zero_copy == 0)) {
}
out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
if (unlikely(rarp_mbuf != NULL)) {
/*
* Inject it to the head of "pkts" array, so that switch's mac