vhost: append shadow used ring function names with split
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Fri, 6 Jul 2018 07:07:15 +0000 (09:07 +0200)
committerThomas Monjalon <thomas@monjalon.net>
Tue, 10 Jul 2018 21:19:29 +0000 (23:19 +0200)
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Acked-by: Tiwei Bie <tiwei.bie@intel.com>
lib/librte_vhost/virtio_net.c

index 22c9f15..49be201 100644 (file)
@@ -77,8 +77,9 @@ free_ind_table(void *idesc)
 }
 
 static __rte_always_inline void
-do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                         uint16_t to, uint16_t from, uint16_t size)
+do_flush_shadow_used_ring_split(struct virtio_net *dev,
+                       struct vhost_virtqueue *vq,
+                       uint16_t to, uint16_t from, uint16_t size)
 {
        rte_memcpy(&vq->used->ring[to],
                        &vq->shadow_used_ring[from],
@@ -89,22 +90,22 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
+flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
        uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
 
        if (used_idx + vq->shadow_used_idx <= vq->size) {
-               do_flush_shadow_used_ring(dev, vq, used_idx, 0,
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
                                          vq->shadow_used_idx);
        } else {
                uint16_t size;
 
                /* update used ring interval [used_idx, vq->size] */
                size = vq->size - used_idx;
-               do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
 
                /* update the left half used ring interval [0, left_size] */
-               do_flush_shadow_used_ring(dev, vq, 0, size,
+               do_flush_shadow_used_ring_split(dev, vq, 0, size,
                                          vq->shadow_used_idx - size);
        }
        vq->last_used_idx += vq->shadow_used_idx;
@@ -120,7 +121,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
 }
 
 static __rte_always_inline void
-update_shadow_used_ring(struct vhost_virtqueue *vq,
+update_shadow_used_ring_split(struct vhost_virtqueue *vq,
                         uint16_t desc_idx, uint16_t len)
 {
        uint16_t i = vq->shadow_used_idx++;
@@ -353,7 +354,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                                VHOST_ACCESS_RW) < 0))
                        return -1;
                len = RTE_MIN(len, size);
-               update_shadow_used_ring(vq, head_idx, len);
+               update_shadow_used_ring_split(vq, head_idx, len);
                size -= len;
 
                cur_idx++;
@@ -579,7 +580,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        do_data_copy_enqueue(dev, vq);
 
        if (likely(vq->shadow_used_idx)) {
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }
 
@@ -1048,7 +1049,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        next = TAILQ_NEXT(zmbuf, next);
 
                        if (mbuf_is_consumed(zmbuf->mbuf)) {
-                               update_shadow_used_ring(vq, zmbuf->desc_idx, 0);
+                               update_shadow_used_ring_split(vq,
+                                               zmbuf->desc_idx, 0);
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
@@ -1059,7 +1061,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        }
                }
 
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }
 
@@ -1091,7 +1093,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        break;
 
                if (likely(dev->dequeue_zero_copy == 0))
-                       update_shadow_used_ring(vq, head_idx, 0);
+                       update_shadow_used_ring_split(vq, head_idx, 0);
 
                rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
 
@@ -1138,7 +1140,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                do_data_copy_dequeue(vq);
                if (unlikely(i < count))
                        vq->shadow_used_idx = i;
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }