}
void
-free_vq(struct vhost_virtqueue *vq)
+free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- rte_free(vq->shadow_used_ring);
+ if (vq_is_packed(dev))
+ rte_free(vq->shadow_used_packed);
+ else
+ rte_free(vq->shadow_used_split);
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
uint32_t i;
for (i = 0; i < dev->nr_vring; i++)
- free_vq(dev->virtqueue[i]);
+ free_vq(dev, dev->virtqueue[i]);
rte_free(dev);
}
unsigned long val;
};
+struct vring_used_elem_packed {
+ uint16_t id;
+ uint32_t len;
+ uint32_t count;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct zcopy_mbuf *zmbufs;
struct zcopy_mbuf_list zmbuf_list;
- struct vring_used_elem *shadow_used_ring;
+ union {
+ struct vring_used_elem *shadow_used_split;
+ struct vring_used_elem_packed *shadow_used_packed;
+ };
uint16_t shadow_used_idx;
struct vhost_vring_addr ring_addrs;
void vhost_destroy_device_notify(struct virtio_net *dev);
void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
-void free_vq(struct vhost_virtqueue *vq);
+void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
dev->virtqueue[dev->nr_vring] = NULL;
cleanup_vq(vq, 1);
- free_vq(vq);
+ free_vq(dev, vq);
}
}
TAILQ_INIT(&vq->zmbuf_list);
}
- vq->shadow_used_ring = rte_malloc(NULL,
+ if (vq_is_packed(dev)) {
+ vq->shadow_used_packed = rte_malloc(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->shadow_used_packed) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
+
+ } else {
+ vq->shadow_used_split = rte_malloc(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE);
- if (!vq->shadow_used_ring) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "failed to allocate memory for shadow used ring.\n");
- return -1;
+ if (!vq->shadow_used_split) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
}
vq->batch_copy_elems = rte_malloc(NULL,
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
struct zcopy_mbuf *new_zmbuf;
- struct vring_used_elem *new_shadow_used_ring;
+ struct vring_used_elem *new_shadow_used_split;
+ struct vring_used_elem_packed *new_shadow_used_packed;
struct batch_copy_elem *new_batch_copy_elems;
int ret;
vq->zmbufs = new_zmbuf;
}
- new_shadow_used_ring = rte_malloc_socket(NULL,
- vq->size * sizeof(struct vring_used_elem),
- RTE_CACHE_LINE_SIZE,
- newnode);
- if (new_shadow_used_ring) {
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = new_shadow_used_ring;
+ if (vq_is_packed(dev)) {
+ new_shadow_used_packed = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_packed) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = new_shadow_used_packed;
+ }
+ } else {
+ new_shadow_used_split = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_split) {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = new_shadow_used_split;
+ }
}
new_batch_copy_elems = rte_malloc_socket(NULL,
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = NULL;
+ if (vq_is_packed(dev)) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = NULL;
+ } else {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = NULL;
+ }
rte_free(vq->batch_copy_elems);
vq->batch_copy_elems = NULL;
uint16_t to, uint16_t from, uint16_t size)
{
rte_memcpy(&vq->used->ring[to],
- &vq->shadow_used_ring[from],
+ &vq->shadow_used_split[from],
size * sizeof(struct vring_used_elem));
vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[to]),
{
uint16_t i = vq->shadow_used_idx++;
- vq->shadow_used_ring[i].id = desc_idx;
- vq->shadow_used_ring[i].len = len;
+ vq->shadow_used_split[i].id = desc_idx;
+ vq->shadow_used_split[i].len = len;
+}
+
+static __rte_unused __rte_always_inline void
+flush_shadow_used_ring_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq)
+{
+ int i;
+ uint16_t used_idx = vq->last_used_idx;
+
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
+ vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
+
+ used_idx += vq->shadow_used_packed[i].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+ }
+
+ rte_smp_wmb();
+
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ uint16_t flags;
+
+ if (vq->shadow_used_packed[i].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+
+ vhost_log_cache_used_vring(dev, vq,
+ vq->last_used_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc));
+
+ vq->last_used_idx += vq->shadow_used_packed[i].count;
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+ }
+
+ rte_smp_wmb();
+ vq->shadow_used_idx = 0;
+ vhost_log_cache_sync(dev, vq);
+}
+
+static __rte_unused __rte_always_inline void
+update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
+ uint16_t desc_idx, uint16_t len, uint16_t count)
+{
+ uint16_t i = vq->shadow_used_idx++;
+
+ vq->shadow_used_packed[i].id = desc_idx;
+ vq->shadow_used_packed[i].len = len;
+ vq->shadow_used_packed[i].count = count;
}
static inline void