From 58902736a4c956b42d4e1e44186cba340d0b84fe Mon Sep 17 00:00:00 2001 From: Joyce Kong Date: Fri, 10 Jul 2020 10:38:50 +0800 Subject: [PATCH] vhost: restrict pointer aliasing for packed ring MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Restrict pointer aliasing to allow the compiler to vectorize loop more aggressively. With this patch, a 9.6% improvement is observed in throughput for the packed virtio-net PVP case, and a 2.8% improvement in throughput for the packed virtio-user PVP case. All performance data are measured on ThunderX-2 platform under 0.001% acceptable packet loss with 1 core on both vhost and virtio side. Signed-off-by: Joyce Kong Reviewed-by: Phil Yang Acked-by: Adrián Moreno --- drivers/net/virtio/virtio_rxtx_simple_neon.c | 5 +++-- lib/librte_vhost/virtio_net.c | 14 +++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c index a9b649814e..02520fda83 100644 --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c @@ -36,8 +36,9 @@ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet */ uint16_t -virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf - **__rte_restrict rx_pkts, uint16_t nb_pkts) +virtio_recv_pkts_vec(void *rx_queue, + struct rte_mbuf **__rte_restrict rx_pkts, + uint16_t nb_pkts) { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 236498f71e..1d0be3dd44 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1353,8 +1353,8 @@ virtio_dev_rx_single_packed(struct virtio_net *dev, static __rte_noinline uint32_t virtio_dev_rx_packed(struct virtio_net *dev, - struct vhost_virtqueue *vq, - struct rte_mbuf **pkts, + struct vhost_virtqueue *__rte_restrict vq, + struct rte_mbuf **__rte_restrict pkts, uint32_t count) { uint32_t pkt_idx = 0; @@ -1439,7 +1439,7 @@ out_access_unlock: uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id, - struct rte_mbuf **pkts, uint16_t count) + struct rte_mbuf **__rte_restrict pkts, uint16_t count) { struct virtio_net *dev = get_device(vid); @@ -2671,9 +2671,9 @@ free_zmbuf(struct vhost_virtqueue *vq) static __rte_noinline uint16_t virtio_dev_tx_packed_zmbuf(struct virtio_net *dev, - struct vhost_virtqueue *vq, + struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool, - struct rte_mbuf **pkts, + struct rte_mbuf **__rte_restrict pkts, uint32_t count) { uint32_t pkt_idx = 0; @@ -2707,9 +2707,9 @@ virtio_dev_tx_packed_zmbuf(struct virtio_net *dev, static __rte_noinline uint16_t virtio_dev_tx_packed(struct virtio_net *dev, - struct vhost_virtqueue *vq, + struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool, - struct rte_mbuf **pkts, + struct rte_mbuf **__rte_restrict pkts, uint32_t count) { uint32_t pkt_idx = 0; -- 2.20.1