net/virtio: fix avail descriptor ID
[dpdk.git] / drivers / net / virtio / virtqueue.h
index 455aaaf..4f3dd31 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
  */
 
 #ifndef _VIRTQUEUE_H_
 
 #include <rte_atomic.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_mempool.h>
+#include <rte_net.h>
 
-#include "virtio_pci.h"
+#include "virtio.h"
 #include "virtio_ring.h"
 #include "virtio_logs.h"
+#include "virtio_rxtx.h"
 
 struct rte_mbuf;
 
+#define DEFAULT_TX_FREE_THRESH 32
+#define DEFAULT_RX_FREE_THRESH 32
+
+#define VIRTIO_MBUF_BURST_SZ 64
 /*
- * Per virtio_config.h in Linux.
+ * Per virtio_ring.h in Linux.
  *     For virtio_pci on SMP, we don't need to order with respect to MMIO
- *     accesses through relaxed memory I/O windows, so smp_mb() et al are
+ *     accesses through relaxed memory I/O windows, so thread_fence is
  *     sufficient.
  *
+ *     For using virtio to talk to real devices (eg. vDPA) we do need real
+ *     barriers.
+ */
+static inline void
+virtio_mb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       else
+               rte_mb();
+}
+
+static inline void
+virtio_rmb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       else
+               rte_io_rmb();
+}
+
+static inline void
+virtio_wmb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       else
+               rte_io_wmb();
+}
+
+static inline uint16_t
+virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
+                             uint8_t weak_barriers)
+{
+       uint16_t flags;
+
+       if (weak_barriers) {
+/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical  on the platforms except Arm.
+ */
+#ifdef RTE_ARCH_ARM
+               flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+#else
+               flags = dp->flags;
+               rte_io_rmb();
+#endif
+       } else {
+               flags = dp->flags;
+               rte_io_rmb();
+       }
+
+       return flags;
+}
+
+static inline void
+virtqueue_store_flags_packed(struct vring_packed_desc *dp,
+                             uint16_t flags, uint8_t weak_barriers)
+{
+       if (weak_barriers) {
+/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical on the platforms except Arm.
  */
-#define virtio_mb()    rte_smp_mb()
-#define virtio_rmb()   rte_smp_rmb()
-#define virtio_wmb()   rte_smp_wmb()
+#ifdef RTE_ARCH_ARM
+               __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+#else
+               rte_io_wmb();
+               dp->flags = flags;
+#endif
+       } else {
+               rte_io_wmb();
+               dp->flags = flags;
+       }
+}
 
 #ifdef RTE_PMD_PACKET_PREFETCH
 #define rte_packet_prefetch(p)  rte_prefetch1(p)
@@ -66,13 +113,24 @@ struct rte_mbuf;
 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
-#ifdef RTE_VIRTIO_USER
-#define MBUF_DATA_DMA_ADDR(mb, offset) \
-       ((uint64_t)((uintptr_t)(*(void **)((uintptr_t)mb + offset)) \
-                       + (mb)->data_off))
-#else /* RTE_VIRTIO_USER */
-#define MBUF_DATA_DMA_ADDR(mb, offset) rte_mbuf_data_dma_addr(mb)
-#endif /* RTE_VIRTIO_USER */
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+       ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+       (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
 
 #define VTNET_SQ_RQ_QUEUE_IDX 0
 #define VTNET_SQ_TQ_QUEUE_IDX 1
@@ -123,12 +181,12 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
  */
 struct virtio_net_ctrl_mac {
        uint32_t entries;
-       uint8_t macs[][ETHER_ADDR_LEN];
-} __attribute__((__packed__));
+       uint8_t macs[][RTE_ETHER_ADDR_LEN];
+} __rte_packed;
 
 #define VIRTIO_NET_CTRL_MAC    1
- #define VIRTIO_NET_CTRL_MAC_TABLE_SET        0
- #define VIRTIO_NET_CTRL_MAC_ADDR_SET         1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET        0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET         1
 
 /**
  * Control VLAN filtering
@@ -143,10 +201,21 @@ struct virtio_net_ctrl_mac {
 #define VIRTIO_NET_CTRL_VLAN_ADD 0
 #define VIRTIO_NET_CTRL_VLAN_DEL 1
 
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE     3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
 struct virtio_net_ctrl_hdr {
        uint8_t class;
        uint8_t cmd;
-} __attribute__((packed));
+} __rte_packed;
 
 typedef uint8_t virtio_net_ctrl_ack;
 
@@ -164,27 +233,36 @@ struct virtio_pmd_ctrl {
 struct vq_desc_extra {
        void *cookie;
        uint16_t ndescs;
+       uint16_t next;
 };
 
+#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
+#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
+#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
+
 struct virtqueue {
        struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
-       struct vring vq_ring;  /**< vring keeping desc, used and avail */
-       /**
-        * Last consumed descriptor in the used table,
-        * trails vq_ring.used->idx.
-        */
-       uint16_t vq_used_cons_idx;
+       union {
+               struct {
+                       /**< vring keeping desc, used and avail */
+                       struct vring ring;
+               } vq_split;
+
+               struct {
+                       /**< vring keeping descs and events */
+                       struct vring_packed ring;
+                       bool used_wrap_counter;
+                       uint16_t cached_flags; /**< cached flags for descs */
+                       uint16_t event_flags_shadow;
+               } vq_packed;
+       };
+
+       uint16_t vq_used_cons_idx; /**< last consumed descriptor */
        uint16_t vq_nentries;  /**< vring desc numbers */
        uint16_t vq_free_cnt;  /**< num of desc available */
        uint16_t vq_avail_idx; /**< sync until needed */
        uint16_t vq_free_thresh; /**< free threshold */
 
-       void *vq_ring_virt_mem;  /**< linear address of vring*/
-       unsigned int vq_ring_size;
-
-       phys_addr_t vq_ring_mem; /**< physical address of vring */
-                               /**< use virtual address for virtio-user. */
-
        /**
         * Head of the free chain in the descriptor table. If
         * there are no free descriptors, this will be set to
@@ -193,9 +271,21 @@ struct virtqueue {
        uint16_t  vq_desc_head_idx;
        uint16_t  vq_desc_tail_idx;
        uint16_t  vq_queue_index;   /**< PCI queue index */
-       uint16_t offset; /**< relative offset to obtain addr in mbuf */
+
+       void *vq_ring_virt_mem;  /**< linear address of vring*/
+       unsigned int vq_ring_size;
+       uint16_t mbuf_addr_offset;
+
+       union {
+               struct virtnet_rx rxq;
+               struct virtnet_tx txq;
+               struct virtnet_ctl cq;
+       };
+
+       rte_iova_t vq_ring_mem; /**< physical address of vring,
+                                * or virtual address for virtio_user. */
+
        uint16_t  *notify_addr;
-       int configured;
        struct rte_mbuf **sw_ring;  /**< RX software ring. */
        struct vq_desc_extra vq_descx[0];
 };
@@ -206,14 +296,13 @@ struct virtqueue {
 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
 
-#define VIRTIO_NET_CTRL_MAC_ADDR_SET         1
-
 /**
  * This is the first element of the scatter-gather list.  If you don't
  * specify GSO or CSUM features, you can simply ignore the header.
  */
 struct virtio_net_hdr {
 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1    /**< Use csum_start,csum_offset*/
+#define VIRTIO_NET_HDR_F_DATA_VALID 2    /**< Checksum is valid */
        uint8_t flags;
 #define VIRTIO_NET_HDR_GSO_NONE     0    /**< Not a GSO frame */
 #define VIRTIO_NET_HDR_GSO_TCPV4    1    /**< GSO frame, IPv4 TCP (TSO) */
@@ -240,13 +329,40 @@ struct virtio_net_hdr_mrg_rxbuf {
 #define VIRTIO_MAX_TX_INDIRECT 8
 struct virtio_tx_region {
        struct virtio_net_hdr_mrg_rxbuf tx_hdr;
-       struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
-                          __attribute__((__aligned__(16)));
+       union {
+               struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
+               struct vring_packed_desc
+                       tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
+       } __rte_aligned(16);
 };
 
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+       uint16_t used, avail, flags;
+
+       flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
+       used = !!(flags & VRING_PACKED_DESC_F_USED);
+       avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
+
+       return avail == used && used == vq->vq_packed.used_wrap_counter;
+}
+
+static inline void
+vring_desc_init_packed(struct virtqueue *vq, int n)
+{
+       int i;
+       for (i = 0; i < n - 1; i++) {
+               vq->vq_packed.ring.desc[i].id = i;
+               vq->vq_descx[i].next = i + 1;
+       }
+       vq->vq_packed.ring.desc[i].id = i;
+       vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
 /* Chain all the descriptors in the ring with an END */
 static inline void
-vring_desc_init(struct vring_desc *dp, uint16_t n)
+vring_desc_init_split(struct vring_desc *dp, uint16_t n)
 {
        uint16_t i;
 
@@ -255,10 +371,84 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
        dp[i].next = VQ_RING_DESC_CHAIN_END;
 }
 
+static inline void
+vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
+{
+       int i;
+       for (i = 0; i < n; i++) {
+               dp[i].id = (uint16_t)i;
+               dp[i].flags = VRING_DESC_F_WRITE;
+       }
+}
+
+/**
+ * Tell the backend not to interrupt us. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_disable_intr_packed(struct virtqueue *vq)
+{
+       if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+               vq->vq_packed.ring.driver->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
+       }
+}
+
+/**
+ * Tell the backend not to interrupt us. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_disable_intr_split(struct virtqueue *vq)
+{
+       vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
 /**
  * Tell the backend not to interrupt us.
  */
-void virtqueue_disable_intr(struct virtqueue *vq);
+static inline void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+       if (virtio_with_packed_queue(vq->hw))
+               virtqueue_disable_intr_packed(vq);
+       else
+               virtqueue_disable_intr_split(vq);
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_packed(struct virtqueue *vq)
+{
+       if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+               vq->vq_packed.ring.driver->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
+       }
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_split(struct virtqueue *vq)
+{
+       vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+}
+
+/**
+ * Tell the backend to interrupt us.
+ */
+static inline void
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+       if (virtio_with_packed_queue(vq->hw))
+               virtqueue_enable_intr_packed(vq);
+       else
+               virtqueue_enable_intr_split(vq);
+}
+
 /**
  *  Dump virtqueue internal structures, for debug purpose only.
  */
@@ -266,7 +456,14 @@ void virtqueue_dump(struct virtqueue *vq);
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
+
+/* Flush the elements in the used ring. */
+void virtqueue_rxvq_flush(struct virtqueue *vq);
+
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
 
 static inline int
 virtqueue_full(const struct virtqueue *vq)
@@ -274,13 +471,71 @@ virtqueue_full(const struct virtqueue *vq)
        return vq->vq_free_cnt == 0;
 }
 
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
+{
+       if (vq_idx == hw->max_queue_pairs * 2)
+               return VTNET_CQ;
+       else if (vq_idx % 2 == 0)
+               return VTNET_RQ;
+       else
+               return VTNET_TQ;
+}
+
+/* virtqueue_nused has load-acquire or rte_io_rmb insed */
+static inline uint16_t
+virtqueue_nused(const struct virtqueue *vq)
+{
+       uint16_t idx;
+
+       if (vq->hw->weak_barriers) {
+       /**
+        * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+        * reports a slightly better perf, which comes from the saved
+        * branch by the compiler.
+        * The if and else branches are identical with the smp and io
+        * barriers both defined as compiler barriers on x86.
+        */
+#ifdef RTE_ARCH_X86_64
+               idx = vq->vq_split.ring.used->idx;
+               rte_smp_rmb();
+#else
+               idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
+                               __ATOMIC_ACQUIRE);
+#endif
+       } else {
+               idx = vq->vq_split.ring.used->idx;
+               rte_io_rmb();
+       }
+       return idx - vq->vq_used_cons_idx;
+}
+
+void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+                         uint16_t num);
 
 static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
-       virtio_wmb();
-       vq->vq_ring.avail->idx = vq->vq_avail_idx;
+       if (vq->hw->weak_barriers) {
+       /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+        * it reports a slightly better perf, which comes from the
+        * saved branch by the compiler.
+        * The if and else branches are identical with the smp and
+        * io barriers both defined as compiler barriers on x86.
+        */
+#ifdef RTE_ARCH_X86_64
+               rte_smp_wmb();
+               vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
+#else
+               __atomic_store_n(&vq->vq_split.ring.avail->idx,
+                                vq->vq_avail_idx, __ATOMIC_RELEASE);
+#endif
+       } else {
+               rte_io_wmb();
+               vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
+       }
 }
 
 static inline void
@@ -295,44 +550,403 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
         * descriptor.
         */
        avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
-       if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
-               vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+       if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
+               vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
        vq->vq_avail_idx++;
 }
 
 static inline int
 virtqueue_kick_prepare(struct virtqueue *vq)
 {
-       return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+       /*
+        * Ensure updated avail->idx is visible to vhost before reading
+        * the used->flags.
+        */
+       virtio_mb(vq->hw->weak_barriers);
+       return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
 }
 
-static inline void
-virtqueue_notify(struct virtqueue *vq)
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
 {
+       uint16_t flags;
+
        /*
-        * Ensure updated avail->idx is visible to host.
-        * For virtio on IA, the notificaiton is through io port operation
-        * which is a serialization instruction itself.
+        * Ensure updated data is visible to vhost before reading the flags.
         */
-       vq->hw->vtpci_ops->notify_queue(vq->hw, vq);
+       virtio_mb(vq->hw->weak_barriers);
+       flags = vq->vq_packed.ring.device->desc_event_flags;
+
+       return flags != RING_EVENT_FLAGS_DISABLE;
+}
+
+/*
+ * virtqueue_kick_prepare*() or the virtio_wmb() should be called
+ * before this function to be sure that all the data is visible to vhost.
+ */
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+       VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
 }
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
        uint16_t used_idx, nused; \
-       used_idx = (vq)->vq_ring.used->idx; \
+       used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
+                                  __ATOMIC_RELAXED); \
        nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+       if (virtio_with_packed_queue((vq)->hw)) { \
+               PMD_INIT_LOG(DEBUG, \
+               "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
+               " cached_flags=0x%x; used_wrap_counter=%d", \
+               (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
+               (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
+               (vq)->vq_packed.used_wrap_counter); \
+               break; \
+       } \
        PMD_INIT_LOG(DEBUG, \
          "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
          " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
          " avail.flags=0x%x; used.flags=0x%x", \
-         (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
-         (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
-         (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
-         (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+         (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
+         (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
+         __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+         (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
 } while (0)
 #else
 #define VIRTQUEUE_DUMP(vq) do { } while (0)
 #endif
 
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do {     \
+       typeof(var) *const var_ = &(var);       \
+       typeof(val)  const val_ = (val);        \
+       if (*var_ != val_)                      \
+               *var_ = val_;                   \
+} while (0)
+
+#define virtqueue_clear_net_hdr(hdr) do {              \
+       typeof(hdr) hdr_ = (hdr);                       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);     \
+       ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);    \
+       ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);          \
+       ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);        \
+} while (0)
+
+static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
+{
+       uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+       uint16_t o_l23_len = (cookie->ol_flags & PKT_TX_TUNNEL_MASK) ?
+                            cookie->outer_l2_len + cookie->outer_l3_len : 0;
+
+       if (cookie->ol_flags & PKT_TX_TCP_SEG)
+               csum_l4 |= PKT_TX_TCP_CKSUM;
+
+       switch (csum_l4) {
+       case PKT_TX_UDP_CKSUM:
+               hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       case PKT_TX_TCP_CKSUM:
+               hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       default:
+               ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+               break;
+       }
+
+       /* TCP Segmentation Offload */
+       if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+               hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                       VIRTIO_NET_HDR_GSO_TCPV6 :
+                       VIRTIO_NET_HDR_GSO_TCPV4;
+               hdr->gso_size = cookie->tso_segsz;
+               hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
+                              cookie->l4_len;
+       } else {
+               ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+       }
+}
+
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+                             uint16_t needed, int use_indirect, int can_push,
+                             int in_order)
+{
+       struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+       struct vq_desc_extra *dxp;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
+       struct vring_packed_desc *start_dp, *head_dp;
+       uint16_t idx, id, head_idx, head_flags;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
+       struct virtio_net_hdr *hdr;
+       uint16_t prev;
+       bool prepend_header = false;
+       uint16_t seg_num = cookie->nb_segs;
+
+       id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+
+       dxp = &vq->vq_descx[id];
+       dxp->ndescs = needed;
+       dxp->cookie = cookie;
+
+       head_idx = vq->vq_avail_idx;
+       idx = head_idx;
+       prev = head_idx;
+       start_dp = vq->vq_packed.ring.desc;
+
+       head_dp = &vq->vq_packed.ring.desc[idx];
+       head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+       head_flags |= vq->vq_packed.cached_flags;
+
+       if (can_push) {
+               /* prepend cannot fail, checked by caller */
+               hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                             -head_size);
+               prepend_header = true;
+
+               /* if offload disabled, it is not zeroed below, do it now */
+               if (!vq->hw->has_tx_offload)
+                       virtqueue_clear_net_hdr(hdr);
+       } else if (use_indirect) {
+               /* setup tx ring slot to point to indirect
+                * descriptor list stored in reserved region.
+                *
+                * the first slot in indirect ring is already preset
+                * to point to the header in reserved region
+                */
+               start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+                       RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
+               start_dp[idx].len   = (seg_num + 1) *
+                       sizeof(struct vring_packed_desc);
+               /* Packed descriptor id needs to be restored when inorder. */
+               if (in_order)
+                       start_dp[idx].id = idx;
+               /* reset flags for indirect desc */
+               head_flags = VRING_DESC_F_INDIRECT;
+               head_flags |= vq->vq_packed.cached_flags;
+               hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+               /* loop below will fill in rest of the indirect elements */
+               start_dp = txr[idx].tx_packed_indir;
+               idx = 1;
+       } else {
+               /* setup first tx ring slot to point to header
+                * stored in reserved region.
+                */
+               start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+                       RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+               start_dp[idx].len   = vq->hw->vtnet_hdr_size;
+               hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+               idx++;
+               if (idx >= vq->vq_nentries) {
+                       idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^=
+                               VRING_PACKED_DESC_F_AVAIL_USED;
+               }
+       }
+
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
+
+       do {
+               uint16_t flags;
+
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+               start_dp[idx].len  = cookie->data_len;
+               if (prepend_header) {
+                       start_dp[idx].addr -= head_size;
+                       start_dp[idx].len += head_size;
+                       prepend_header = false;
+               }
+
+               if (likely(idx != head_idx)) {
+                       flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+                       flags |= vq->vq_packed.cached_flags;
+                       start_dp[idx].flags = flags;
+               }
+               prev = idx;
+               idx++;
+               if (idx >= vq->vq_nentries) {
+                       idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^=
+                               VRING_PACKED_DESC_F_AVAIL_USED;
+               }
+       } while ((cookie = cookie->next) != NULL);
+
+       start_dp[prev].id = id;
+
+       if (use_indirect) {
+               idx = head_idx;
+               if (++idx >= vq->vq_nentries) {
+                       idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^=
+                               VRING_PACKED_DESC_F_AVAIL_USED;
+               }
+       }
+
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+       vq->vq_avail_idx = idx;
+
+       if (!in_order) {
+               vq->vq_desc_head_idx = dxp->next;
+               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+                       vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+       }
+
+       virtqueue_store_flags_packed(head_dp, head_flags,
+                                    vq->hw->weak_barriers);
+}
+
+static void
+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
+{
+       struct vq_desc_extra *dxp;
+
+       dxp = &vq->vq_descx[id];
+       vq->vq_free_cnt += dxp->ndescs;
+
+       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
+               vq->vq_desc_head_idx = id;
+       else
+               vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+
+       vq->vq_desc_tail_idx = id;
+       dxp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static void
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t used_idx, id, curr_id, free_cnt = 0;
+       uint16_t size = vq->vq_nentries;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+       int nb = num;
+
+       used_idx = vq->vq_used_cons_idx;
+       /* desc_is_used has a load-acquire or rte_io_rmb inside
+        * and wait for used desc in virtqueue.
+        */
+       while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
+               id = desc[used_idx].id;
+               do {
+                       curr_id = used_idx;
+                       dxp = &vq->vq_descx[used_idx];
+                       used_idx += dxp->ndescs;
+                       free_cnt += dxp->ndescs;
+                       nb -= dxp->ndescs;
+                       if (used_idx >= size) {
+                               used_idx -= size;
+                               vq->vq_packed.used_wrap_counter ^= 1;
+                       }
+                       if (dxp->cookie != NULL) {
+                               rte_pktmbuf_free(dxp->cookie);
+                               dxp->cookie = NULL;
+                       }
+               } while (curr_id != id);
+       }
+       vq->vq_used_cons_idx = used_idx;
+       vq->vq_free_cnt += free_cnt;
+}
+
+static void
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t used_idx, id;
+       uint16_t size = vq->vq_nentries;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+
+       used_idx = vq->vq_used_cons_idx;
+       /* desc_is_used has a load-acquire or rte_io_rmb inside
+        * and wait for used desc in virtqueue.
+        */
+       while (num-- && desc_is_used(&desc[used_idx], vq)) {
+               id = desc[used_idx].id;
+               dxp = &vq->vq_descx[id];
+               vq->vq_used_cons_idx += dxp->ndescs;
+               if (vq->vq_used_cons_idx >= size) {
+                       vq->vq_used_cons_idx -= size;
+                       vq->vq_packed.used_wrap_counter ^= 1;
+               }
+               vq_ring_free_id_packed(vq, id);
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+               used_idx = vq->vq_used_cons_idx;
+       }
+}
+
+/* Cleanup from completed transmits. */
+static inline void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
+{
+       if (in_order)
+               virtio_xmit_cleanup_inorder_packed(vq, num);
+       else
+               virtio_xmit_cleanup_normal_packed(vq, num);
+}
+
+static inline void
+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t i, used_idx, desc_idx;
+       for (i = 0; i < num; i++) {
+               struct vring_used_elem *uep;
+               struct vq_desc_extra *dxp;
+
+               used_idx = (uint16_t)(vq->vq_used_cons_idx &
+                               (vq->vq_nentries - 1));
+               uep = &vq->vq_split.ring.used->ring[used_idx];
+
+               desc_idx = (uint16_t)uep->id;
+               dxp = &vq->vq_descx[desc_idx];
+               vq->vq_used_cons_idx++;
+               vq_ring_free_chain(vq, desc_idx);
+
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+}
+
+/* Cleanup from completed inorder transmits. */
+static __rte_always_inline void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t i, idx = vq->vq_used_cons_idx;
+       int16_t free_cnt = 0;
+       struct vq_desc_extra *dxp = NULL;
+
+       if (unlikely(num == 0))
+               return;
+
+       for (i = 0; i < num; i++) {
+               dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+               free_cnt += dxp->ndescs;
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+
+       vq->vq_free_cnt += free_cnt;
+       vq->vq_used_cons_idx = idx;
+}
 #endif /* _VIRTQUEUE_H_ */