fix typos using codespell utility
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 74263a3..ebfda1c 100644 (file)
@@ -39,7 +39,7 @@
 #include <rte_memcpy.h>
 #include <rte_ether.h>
 #include <rte_ip.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
 #include <rte_tcp.h>
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include "vhost.h"
 
 #define MAX_PKT_BURST 32
-#define VHOST_LOG_PAGE 4096
 
-static inline void __attribute__((always_inline))
-vhost_log_page(uint8_t *log_base, uint64_t page)
+static bool
+is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
-       log_base[page / 8] |= 1 << (page % 8);
+       return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
 }
 
-static inline void __attribute__((always_inline))
-vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+static __rte_always_inline void
+do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                         uint16_t to, uint16_t from, uint16_t size)
 {
-       uint64_t page;
+       rte_memcpy(&vq->used->ring[to],
+                       &vq->shadow_used_ring[from],
+                       size * sizeof(struct vring_used_elem));
+       vhost_log_used_vring(dev, vq,
+                       offsetof(struct vring_used, ring[to]),
+                       size * sizeof(struct vring_used_elem));
+}
 
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
+static __rte_always_inline void
+flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
 
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
+       if (used_idx + vq->shadow_used_idx <= vq->size) {
+               do_flush_shadow_used_ring(dev, vq, used_idx, 0,
+                                         vq->shadow_used_idx);
+       } else {
+               uint16_t size;
 
-       /* To make sure guest memory updates are committed before logging */
-       rte_smp_wmb();
+               /* update used ring interval [used_idx, vq->size] */
+               size = vq->size - used_idx;
+               do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
 
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-               page += 1;
+               /* update the left half used ring interval [0, left_size] */
+               do_flush_shadow_used_ring(dev, vq, 0, size,
+                                         vq->shadow_used_idx - size);
        }
-}
+       vq->last_used_idx += vq->shadow_used_idx;
 
-static inline void __attribute__((always_inline))
-vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                    uint64_t offset, uint64_t len)
-{
-       vhost_log_write(dev, vq->log_guest_addr + offset, len);
+       rte_smp_wmb();
+
+       *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
+       vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
+               sizeof(vq->used->idx));
 }
 
-static bool
-is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
+static __rte_always_inline void
+update_shadow_used_ring(struct vhost_virtqueue *vq,
+                        uint16_t desc_idx, uint16_t len)
 {
-       return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
+       uint16_t i = vq->shadow_used_idx++;
+
+       vq->shadow_used_ring[i].id  = desc_idx;
+       vq->shadow_used_ring[i].len = len;
 }
 
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do {     \
+       if ((var) != (val))                     \
+               (var) = (val);                  \
+} while (0)
+
 static void
 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
 {
@@ -112,6 +132,10 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
                                                cksum));
                        break;
                }
+       } else {
+               ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
+               ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
+               ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
        }
 
        if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
@@ -122,32 +146,27 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
                net_hdr->gso_size = m_buf->tso_segsz;
                net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
                                        + m_buf->l4_len;
+       } else {
+               ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
+               ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
+               ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
        }
 }
 
-static inline void
-copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
-                   struct virtio_net_hdr_mrg_rxbuf hdr)
-{
-       if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
-               *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
-       else
-               *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
-}
-
-static inline int __attribute__((always_inline))
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                 struct rte_mbuf *m, uint16_t desc_idx)
+static __rte_always_inline int
+copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
+                 struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
 {
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
        struct vring_desc *desc;
        uint64_t desc_addr;
-       struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+       /* A counter to avoid desc dead loop chain */
+       uint16_t nr_desc = 1;
 
-       desc = &vq->desc[desc_idx];
-       desc_addr = gpa_to_vva(dev, desc->addr);
+       desc = &descs[desc_idx];
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
         * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
@@ -158,8 +177,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
        rte_prefetch0((void *)(uintptr_t)desc_addr);
 
-       virtio_enqueue_offload(m, &virtio_hdr.hdr);
-       copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+       virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
        vhost_log_write(dev, desc->addr, dev->vhost_hlen);
        PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
 
@@ -183,11 +201,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                /* Room in vring buffer is not enough */
                                return -1;
                        }
-                       if (unlikely(desc->next >= vq->size))
+                       if (unlikely(desc->next >= size || ++nr_desc > size))
                                return -1;
 
-                       desc = &vq->desc[desc->next];
-                       desc_addr = gpa_to_vva(dev, desc->addr);
+                       desc = &descs[desc->next];
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
 
@@ -215,22 +233,23 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 /**
  * This function adds buffers to the virtio devices RX virtqueue. Buffers can
  * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are succesfully
+ * count is returned to indicate the number of packets that are successfully
  * added to the RX queue. This function works when the mbuf is scattered, but
  * it doesn't support the mergeable feature.
  */
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
              struct rte_mbuf **pkts, uint32_t count)
 {
        struct vhost_virtqueue *vq;
        uint16_t avail_idx, free_entries, start_idx;
        uint16_t desc_indexes[MAX_PKT_BURST];
+       struct vring_desc *descs;
        uint16_t used_idx;
-       uint32_t i;
+       uint32_t i, sz;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -269,7 +288,23 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                uint16_t desc_idx = desc_indexes[i];
                int err;
 
-               err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+               if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       descs = (struct vring_desc *)(uintptr_t)
+                               rte_vhost_gpa_to_vva(dev->mem,
+                                       vq->desc[desc_idx].addr);
+                       if (unlikely(!descs)) {
+                               count = i;
+                               break;
+                       }
+
+                       desc_idx = 0;
+                       sz = vq->desc[desc_idx].len / sizeof(*descs);
+               } else {
+                       descs = vq->desc;
+                       sz = vq->size;
+               }
+
+               err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
                if (unlikely(err)) {
                        used_idx = (start_idx + i) & (vq->size - 1);
                        vq->used->ring[used_idx].len = dev->vhost_hlen;
@@ -300,33 +335,46 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        return count;
 }
 
-static inline int
-fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
-            uint32_t *allocated, uint32_t *vec_idx,
-            struct buf_vector *buf_vec)
+static __rte_always_inline int
+fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                        uint32_t avail_idx, uint32_t *vec_idx,
+                        struct buf_vector *buf_vec, uint16_t *desc_chain_head,
+                        uint16_t *desc_chain_len)
 {
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
-       uint32_t len    = *allocated;
+       uint32_t len    = 0;
+       struct vring_desc *descs = vq->desc;
+
+       *desc_chain_head = idx;
+
+       if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               descs = (struct vring_desc *)(uintptr_t)
+                       rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
+               if (unlikely(!descs))
+                       return -1;
+
+               idx = 0;
+       }
 
        while (1) {
                if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
                        return -1;
 
-               len += vq->desc[idx].len;
-               buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-               buf_vec[vec_id].buf_len  = vq->desc[idx].len;
+               len += descs[idx].len;
+               buf_vec[vec_id].buf_addr = descs[idx].addr;
+               buf_vec[vec_id].buf_len  = descs[idx].len;
                buf_vec[vec_id].desc_idx = idx;
                vec_id++;
 
-               if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
+               if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
                        break;
 
-               idx = vq->desc[idx].next;
+               idx = descs[idx].next;
        }
 
-       *allocated = len;
-       *vec_idx   = vec_id;
+       *desc_chain_len = len;
+       *vec_idx = vec_id;
 
        return 0;
 }
@@ -335,31 +383,34 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
  * Returns -1 on fail, 0 on success
  */
 static inline int
-reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
-                           uint16_t *end, struct buf_vector *buf_vec)
+reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                               uint32_t size, struct buf_vector *buf_vec,
+                               uint16_t *num_buffers, uint16_t avail_head)
 {
        uint16_t cur_idx;
-       uint16_t avail_idx;
-       uint32_t allocated = 0;
        uint32_t vec_idx = 0;
        uint16_t tries = 0;
 
-       cur_idx  = vq->last_used_idx;
+       uint16_t head_idx = 0;
+       uint16_t len = 0;
 
-       while (1) {
-               avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-               if (unlikely(cur_idx == avail_idx))
+       *num_buffers = 0;
+       cur_idx  = vq->last_avail_idx;
+
+       while (size > 0) {
+               if (unlikely(cur_idx == avail_head))
                        return -1;
 
-               if (unlikely(fill_vec_buf(vq, cur_idx, &allocated,
-                                         &vec_idx, buf_vec) < 0))
+               if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
+                                               &head_idx, &len) < 0))
                        return -1;
+               len = RTE_MIN(len, size);
+               update_shadow_used_ring(vq, head_idx, len);
+               size -= len;
 
                cur_idx++;
                tries++;
-
-               if (allocated >= size)
-                       break;
+               *num_buffers += 1;
 
                /*
                 * if we tried all available ring items, and still
@@ -370,45 +421,35 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
                        return -1;
        }
 
-       *end = cur_idx;
        return 0;
 }
 
-static inline uint32_t __attribute__((always_inline))
-copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                           uint16_t end_idx, struct rte_mbuf *m,
-                           struct buf_vector *buf_vec)
+static __rte_always_inline int
+copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
+                           struct buf_vector *buf_vec, uint16_t num_buffers)
 {
-       struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
        uint32_t vec_idx = 0;
-       uint16_t start_idx = vq->last_used_idx;
-       uint16_t cur_idx = start_idx;
        uint64_t desc_addr;
        uint32_t mbuf_offset, mbuf_avail;
        uint32_t desc_offset, desc_avail;
        uint32_t cpy_len;
-       uint16_t desc_idx, used_idx;
+       uint64_t hdr_addr, hdr_phys_addr;
+       struct rte_mbuf *hdr_mbuf;
 
        if (unlikely(m == NULL))
-               return 0;
-
-       LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
-               dev->vid, cur_idx, end_idx);
+               return -1;
 
-       desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
        if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
-               return 0;
+               return -1;
 
-       rte_prefetch0((void *)(uintptr_t)desc_addr);
+       hdr_mbuf = m;
+       hdr_addr = desc_addr;
+       hdr_phys_addr = buf_vec[vec_idx].buf_addr;
+       rte_prefetch0((void *)(uintptr_t)hdr_addr);
 
-       virtio_hdr.num_buffers = end_idx - start_idx;
        LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
-               dev->vid, virtio_hdr.num_buffers);
-
-       virtio_enqueue_offload(m, &virtio_hdr.hdr);
-       copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
-       vhost_log_write(dev, buf_vec[vec_idx].buf_addr, dev->vhost_hlen);
-       PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+               dev->vid, num_buffers);
 
        desc_avail  = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
        desc_offset = dev->vhost_hlen;
@@ -418,23 +459,11 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
        while (mbuf_avail != 0 || m->next != NULL) {
                /* done with current desc buf, get the next one */
                if (desc_avail == 0) {
-                       desc_idx = buf_vec[vec_idx].desc_idx;
-
-                       if (!(vq->desc[desc_idx].flags & VRING_DESC_F_NEXT)) {
-                               /* Update used ring with desc information */
-                               used_idx = cur_idx++ & (vq->size - 1);
-                               vq->used->ring[used_idx].id  = desc_idx;
-                               vq->used->ring[used_idx].len = desc_offset;
-                               vhost_log_used_vring(dev, vq,
-                                       offsetof(struct vring_used,
-                                                ring[used_idx]),
-                                       sizeof(vq->used->ring[used_idx]));
-                       }
-
                        vec_idx++;
-                       desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem,
+                                       buf_vec[vec_idx].buf_addr);
                        if (unlikely(!desc_addr))
-                               return 0;
+                               return -1;
 
                        /* Prefetch buffer address. */
                        rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -450,6 +479,21 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        mbuf_avail  = rte_pktmbuf_data_len(m);
                }
 
+               if (hdr_addr) {
+                       struct virtio_net_hdr_mrg_rxbuf *hdr;
+
+                       hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
+                               hdr_addr;
+                       virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
+                       ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
+
+                       vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
+                       PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+                                    dev->vhost_hlen, 0);
+
+                       hdr_addr = 0;
+               }
+
                cpy_len = RTE_MIN(desc_avail, mbuf_avail);
                rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
                        rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
@@ -465,27 +509,21 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                desc_offset += cpy_len;
        }
 
-       used_idx = cur_idx & (vq->size - 1);
-       vq->used->ring[used_idx].id = buf_vec[vec_idx].desc_idx;
-       vq->used->ring[used_idx].len = desc_offset;
-       vhost_log_used_vring(dev, vq,
-               offsetof(struct vring_used, ring[used_idx]),
-               sizeof(vq->used->ring[used_idx]));
-
-       return end_idx - start_idx;
+       return 0;
 }
 
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
 virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        struct rte_mbuf **pkts, uint32_t count)
 {
        struct vhost_virtqueue *vq;
-       uint32_t pkt_idx = 0, nr_used = 0;
-       uint16_t end;
+       uint32_t pkt_idx = 0;
+       uint16_t num_buffers;
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
+       uint16_t avail_head;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -499,28 +537,39 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        if (count == 0)
                return 0;
 
+       rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+       vq->shadow_used_idx = 0;
+       avail_head = *((volatile uint16_t *)&vq->avail->idx);
        for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
                uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 
-               if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len,
-                                                        &end, buf_vec) < 0)) {
+               if (unlikely(reserve_avail_buf_mergeable(dev, vq,
+                                               pkt_len, buf_vec, &num_buffers,
+                                               avail_head) < 0)) {
                        LOG_DEBUG(VHOST_DATA,
                                "(%d) failed to get enough desc from vring\n",
                                dev->vid);
+                       vq->shadow_used_idx -= num_buffers;
                        break;
                }
 
-               nr_used = copy_mbuf_to_desc_mergeable(dev, vq, end,
-                                                     pkts[pkt_idx], buf_vec);
-               rte_smp_wmb();
+               LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+                       dev->vid, vq->last_avail_idx,
+                       vq->last_avail_idx + num_buffers);
 
-               *(volatile uint16_t *)&vq->used->idx += nr_used;
-               vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
-                       sizeof(vq->used->idx));
-               vq->last_used_idx += nr_used;
+               if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx],
+                                               buf_vec, num_buffers) < 0) {
+                       vq->shadow_used_idx -= num_buffers;
+                       break;
+               }
+
+               vq->last_avail_idx += num_buffers;
        }
 
-       if (likely(pkt_idx)) {
+       if (likely(vq->shadow_used_idx)) {
+               flush_shadow_used_ring(dev, vq);
+
                /* flush used->idx update before we read avail->flags. */
                rte_mb();
 
@@ -548,6 +597,18 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
                return virtio_dev_rx(dev, queue_id, pkts, count);
 }
 
+static inline bool
+virtio_net_with_host_offload(struct virtio_net *dev)
+{
+       if (dev->features &
+                       (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
+                        VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
+                        VIRTIO_NET_F_HOST_UFO))
+               return true;
+
+       return false;
+}
+
 static void
 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 {
@@ -573,14 +634,14 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 
        switch (ethertype) {
        case ETHER_TYPE_IPv4:
-               ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+               ipv4_hdr = l3_hdr;
                *l4_proto = ipv4_hdr->next_proto_id;
                m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
                *l4_hdr = (char *)l3_hdr + m->l3_len;
                m->ol_flags |= PKT_TX_IPV4;
                break;
        case ETHER_TYPE_IPv6:
-               ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+               ipv6_hdr = l3_hdr;
                *l4_proto = ipv6_hdr->proto;
                m->l3_len = sizeof(struct ipv6_hdr);
                *l4_hdr = (char *)l3_hdr + m->l3_len;
@@ -589,17 +650,21 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        default:
                m->l3_len = 0;
                *l4_proto = 0;
+               *l4_hdr = NULL;
                break;
        }
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 {
        uint16_t l4_proto = 0;
        void *l4_hdr = NULL;
        struct tcp_hdr *tcp_hdr = NULL;
 
+       if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+               return;
+
        parse_ethernet(m, &l4_proto, &l4_hdr);
        if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                if (hdr->csum_start == (m->l2_len + m->l3_len)) {
@@ -622,11 +687,11 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
                }
        }
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+       if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                case VIRTIO_NET_HDR_GSO_TCPV6:
-                       tcp_hdr = (struct tcp_hdr *)l4_hdr;
+                       tcp_hdr = l4_hdr;
                        m->ol_flags |= PKT_TX_TCP_SEG;
                        m->tso_segsz = hdr->gso_size;
                        m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
@@ -678,13 +743,13 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
        return 0;
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 put_zmbuf(struct zcopy_mbuf *zmbuf)
 {
        zmbuf->in_use = 0;
 }
 
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
 copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                  uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
                  struct rte_mempool *mbuf_pool)
@@ -695,7 +760,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
        struct rte_mbuf *cur = m, *prev = m;
-       struct virtio_net_hdr *hdr;
+       struct virtio_net_hdr *hdr = NULL;
        /* A counter to avoid desc dead loop chain */
        uint32_t nr_desc = 1;
 
@@ -704,12 +769,14 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                        (desc->flags & VRING_DESC_F_INDIRECT))
                return -1;
 
-       desc_addr = gpa_to_vva(dev, desc->addr);
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
        if (unlikely(!desc_addr))
                return -1;
 
-       hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
-       rte_prefetch0(hdr);
+       if (virtio_net_with_host_offload(dev)) {
+               hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
+               rte_prefetch0(hdr);
+       }
 
        /*
         * A virtio driver normally uses at least 2 desc buffers
@@ -722,22 +789,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
                        return -1;
 
-               desc_addr = gpa_to_vva(dev, desc->addr);
+               desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                if (unlikely(!desc_addr))
                        return -1;
 
-               rte_prefetch0((void *)(uintptr_t)desc_addr);
-
                desc_offset = 0;
                desc_avail  = desc->len;
                nr_desc    += 1;
-
-               PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
        } else {
                desc_avail  = desc->len - dev->vhost_hlen;
                desc_offset = dev->vhost_hlen;
        }
 
+       rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
+
+       PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+
        mbuf_offset = 0;
        mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
        while (1) {
@@ -786,7 +853,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                        if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
                                return -1;
 
-                       desc_addr = gpa_to_vva(dev, desc->addr);
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
 
@@ -809,6 +876,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                                        "allocate memory for mbuf.\n");
                                return -1;
                        }
+                       if (unlikely(dev->dequeue_zero_copy))
+                               rte_mbuf_refcnt_update(cur, 1);
 
                        prev->next = cur;
                        prev->data_len = mbuf_offset;
@@ -824,13 +893,13 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
        prev->data_len = mbuf_offset;
        m->pkt_len    += mbuf_offset;
 
-       if (hdr->flags != 0 || hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE)
+       if (hdr)
                vhost_dequeue_offload(hdr, m);
 
        return 0;
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
                 uint32_t used_idx, uint32_t desc_idx)
 {
@@ -841,7 +910,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        sizeof(vq->used->ring[used_idx]));
 }
 
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
 update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
                uint32_t count)
 {
@@ -861,7 +930,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
                eventfd_write(vq->callfd, (eventfd_t)1);
 }
 
-static inline struct zcopy_mbuf *__attribute__((always_inline))
+static __rte_always_inline struct zcopy_mbuf *
 get_zmbuf(struct vhost_virtqueue *vq)
 {
        uint16_t i;
@@ -892,7 +961,7 @@ again:
        return NULL;
 }
 
-static inline bool __attribute__((always_inline))
+static __rte_always_inline bool
 mbuf_is_consumed(struct rte_mbuf *m)
 {
        while (m) {
@@ -921,7 +990,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        if (!dev)
                return 0;
 
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -960,9 +1029,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
         * array, to looks like that guest actually send such packet.
         *
         * Check user_send_rarp() for more information.
+        *
+        * broadcast_rarp shares a cacheline in the virtio_net structure
+        * with some fields that are accessed during enqueue and
+        * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
+        * result in false sharing between enqueue and dequeue.
+        *
+        * Prevent unnecessary false sharing by reading broadcast_rarp first
+        * and only performing cmpset if the read indicates it is likely to
+        * be set.
         */
-       if (unlikely(rte_atomic16_cmpset((volatile uint16_t *)
-                                        &dev->broadcast_rarp.cnt, 1, 0))) {
+
+       if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
+                       rte_atomic16_cmpset((volatile uint16_t *)
+                               &dev->broadcast_rarp.cnt, 1, 0))) {
+
                rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
                if (rarp_mbuf == NULL) {
                        RTE_LOG(ERR, VHOST_DATA,
@@ -1017,7 +1098,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
 
                if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
-                       desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+                       desc = (struct vring_desc *)(uintptr_t)
+                               rte_vhost_gpa_to_vva(dev->mem,
                                        vq->desc[desc_indexes[i]].addr);
                        if (unlikely(!desc))
                                break;