net/virtio: fix packed ring indirect descricptors setup
[dpdk.git] / drivers / net / virtio / virtqueue.h
index fedbaa3..a413632 100644 (file)
@@ -10,6 +10,7 @@
 #include <rte_atomic.h>
 #include <rte_memory.h>
 #include <rte_mempool.h>
+#include <rte_net.h>
 
 #include "virtio_pci.h"
 #include "virtio_ring.h"
 
 struct rte_mbuf;
 
+#define DEFAULT_TX_FREE_THRESH 32
+#define DEFAULT_RX_FREE_THRESH 32
+
+#define VIRTIO_MBUF_BURST_SZ 64
 /*
- * Per virtio_config.h in Linux.
+ * Per virtio_ring.h in Linux.
  *     For virtio_pci on SMP, we don't need to order with respect to MMIO
  *     accesses through relaxed memory I/O windows, so smp_mb() et al are
  *     sufficient.
  *
+ *     For using virtio to talk to real devices (eg. vDPA) we do need real
+ *     barriers.
+ */
+static inline void
+virtio_mb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_smp_mb();
+       else
+               rte_mb();
+}
+
+static inline void
+virtio_rmb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_smp_rmb();
+       else
+               rte_io_rmb();
+}
+
+static inline void
+virtio_wmb(uint8_t weak_barriers)
+{
+       if (weak_barriers)
+               rte_smp_wmb();
+       else
+               rte_io_wmb();
+}
+
+static inline uint16_t
+virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
+                             uint8_t weak_barriers)
+{
+       uint16_t flags;
+
+       if (weak_barriers) {
+/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and io barriers both
+ * defined as compiler barriers on x86.
  */
-#define virtio_mb()    rte_smp_mb()
-#define virtio_rmb()   rte_smp_rmb()
-#define virtio_wmb()   rte_smp_wmb()
+#ifdef RTE_ARCH_X86_64
+               flags = dp->flags;
+               rte_smp_rmb();
+#else
+               flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+#endif
+       } else {
+               flags = dp->flags;
+               rte_io_rmb();
+       }
 
+       return flags;
+}
+
+static inline void
+virtqueue_store_flags_packed(struct vring_packed_desc *dp,
+                             uint16_t flags, uint8_t weak_barriers)
+{
+       if (weak_barriers) {
+/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and io barriers both
+ * defined as compiler barriers on x86.
+ */
+#ifdef RTE_ARCH_X86_64
+               rte_smp_wmb();
+               dp->flags = flags;
+#else
+               __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+#endif
+       } else {
+               rte_io_wmb();
+               dp->flags = flags;
+       }
+}
 #ifdef RTE_PMD_PACKET_PREFETCH
 #define rte_packet_prefetch(p)  rte_prefetch1(p)
 #else
@@ -109,8 +186,8 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
  */
 struct virtio_net_ctrl_mac {
        uint32_t entries;
-       uint8_t macs[][ETHER_ADDR_LEN];
-} __attribute__((__packed__));
+       uint8_t macs[][RTE_ETHER_ADDR_LEN];
+} __rte_packed;
 
 #define VIRTIO_NET_CTRL_MAC    1
 #define VIRTIO_NET_CTRL_MAC_TABLE_SET        0
@@ -129,10 +206,21 @@ struct virtio_net_ctrl_mac {
 #define VIRTIO_NET_CTRL_VLAN_ADD 0
 #define VIRTIO_NET_CTRL_VLAN_DEL 1
 
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE     3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
 struct virtio_net_ctrl_hdr {
        uint8_t class;
        uint8_t cmd;
-} __attribute__((packed));
+} __rte_packed;
 
 typedef uint8_t virtio_net_ctrl_ack;
 
@@ -150,16 +238,27 @@ struct virtio_pmd_ctrl {
 struct vq_desc_extra {
        void *cookie;
        uint16_t ndescs;
+       uint16_t next;
 };
 
 struct virtqueue {
        struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
-       struct vring vq_ring;  /**< vring keeping desc, used and avail */
-       /**
-        * Last consumed descriptor in the used table,
-        * trails vq_ring.used->idx.
-        */
-       uint16_t vq_used_cons_idx;
+       union {
+               struct {
+                       /**< vring keeping desc, used and avail */
+                       struct vring ring;
+               } vq_split;
+
+               struct {
+                       /**< vring keeping descs and events */
+                       struct vring_packed ring;
+                       bool used_wrap_counter;
+                       uint16_t cached_flags; /**< cached flags for descs */
+                       uint16_t event_flags_shadow;
+               } vq_packed;
+       };
+
+       uint16_t vq_used_cons_idx; /**< last consumed descriptor */
        uint16_t vq_nentries;  /**< vring desc numbers */
        uint16_t vq_free_cnt;  /**< num of desc available */
        uint16_t vq_avail_idx; /**< sync until needed */
@@ -230,13 +329,40 @@ struct virtio_net_hdr_mrg_rxbuf {
 #define VIRTIO_MAX_TX_INDIRECT 8
 struct virtio_tx_region {
        struct virtio_net_hdr_mrg_rxbuf tx_hdr;
-       struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
-                          __attribute__((__aligned__(16)));
+       union {
+               struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
+               struct vring_packed_desc
+                       tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
+       } __rte_aligned(16);
 };
 
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+       uint16_t used, avail, flags;
+
+       flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
+       used = !!(flags & VRING_PACKED_DESC_F_USED);
+       avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
+
+       return avail == used && used == vq->vq_packed.used_wrap_counter;
+}
+
+static inline void
+vring_desc_init_packed(struct virtqueue *vq, int n)
+{
+       int i;
+       for (i = 0; i < n - 1; i++) {
+               vq->vq_packed.ring.desc[i].id = i;
+               vq->vq_descx[i].next = i + 1;
+       }
+       vq->vq_packed.ring.desc[i].id = i;
+       vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
 /* Chain all the descriptors in the ring with an END */
 static inline void
-vring_desc_init(struct vring_desc *dp, uint16_t n)
+vring_desc_init_split(struct vring_desc *dp, uint16_t n)
 {
        uint16_t i;
 
@@ -245,13 +371,70 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
        dp[i].next = VQ_RING_DESC_CHAIN_END;
 }
 
+static inline void
+vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
+{
+       int i;
+       for (i = 0; i < n; i++) {
+               dp[i].id = (uint16_t)i;
+               dp[i].flags = VRING_DESC_F_WRITE;
+       }
+}
+
+/**
+ * Tell the backend not to interrupt us. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_disable_intr_packed(struct virtqueue *vq)
+{
+       if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+               vq->vq_packed.ring.driver->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
+       }
+}
+
+/**
+ * Tell the backend not to interrupt us. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_disable_intr_split(struct virtqueue *vq)
+{
+       vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
 /**
  * Tell the backend not to interrupt us.
  */
 static inline void
 virtqueue_disable_intr(struct virtqueue *vq)
 {
-       vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+       if (vtpci_packed_queue(vq->hw))
+               virtqueue_disable_intr_packed(vq);
+       else
+               virtqueue_disable_intr_split(vq);
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_packed(struct virtqueue *vq)
+{
+       if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+               vq->vq_packed.ring.driver->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
+       }
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_split(struct virtqueue *vq)
+{
+       vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
 }
 
 /**
@@ -260,7 +443,10 @@ virtqueue_disable_intr(struct virtqueue *vq)
 static inline void
 virtqueue_enable_intr(struct virtqueue *vq)
 {
-       vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+       if (vtpci_packed_queue(vq->hw))
+               virtqueue_enable_intr_packed(vq);
+       else
+               virtqueue_enable_intr_split(vq);
 }
 
 /**
@@ -270,26 +456,86 @@ void virtqueue_dump(struct virtqueue *vq);
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
        return vq->vq_free_cnt == 0;
 }
 
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+       if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+               return VTNET_CQ;
+       else if (vtpci_queue_idx % 2 == 0)
+               return VTNET_RQ;
+       else
+               return VTNET_TQ;
+}
+
+/* virtqueue_nused has load-acquire or rte_io_rmb insed */
+static inline uint16_t
+virtqueue_nused(const struct virtqueue *vq)
+{
+       uint16_t idx;
+
+       if (vq->hw->weak_barriers) {
+       /**
+        * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+        * reports a slightly better perf, which comes from the saved
+        * branch by the compiler.
+        * The if and else branches are identical with the smp and io
+        * barriers both defined as compiler barriers on x86.
+        */
+#ifdef RTE_ARCH_X86_64
+               idx = vq->vq_split.ring.used->idx;
+               rte_smp_rmb();
+#else
+               idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
+                               __ATOMIC_ACQUIRE);
+#endif
+       } else {
+               idx = vq->vq_split.ring.used->idx;
+               rte_io_rmb();
+       }
+       return idx - vq->vq_used_cons_idx;
+}
 
 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+                         uint16_t num);
 
 static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
-       virtio_wmb();
-       vq->vq_ring.avail->idx = vq->vq_avail_idx;
+       if (vq->hw->weak_barriers) {
+       /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+        * it reports a slightly better perf, which comes from the
+        * saved branch by the compiler.
+        * The if and else branches are identical with the smp and
+        * io barriers both defined as compiler barriers on x86.
+        */
+#ifdef RTE_ARCH_X86_64
+               rte_smp_wmb();
+               vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
+#else
+               __atomic_store_n(&vq->vq_split.ring.avail->idx,
+                                vq->vq_avail_idx, __ATOMIC_RELEASE);
+#endif
+       } else {
+               rte_io_wmb();
+               vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
+       }
 }
 
 static inline void
@@ -304,44 +550,371 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
         * descriptor.
         */
        avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
-       if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
-               vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+       if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
+               vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
        vq->vq_avail_idx++;
 }
 
 static inline int
 virtqueue_kick_prepare(struct virtqueue *vq)
 {
-       return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+       /*
+        * Ensure updated avail->idx is visible to vhost before reading
+        * the used->flags.
+        */
+       virtio_mb(vq->hw->weak_barriers);
+       return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
 }
 
-static inline void
-virtqueue_notify(struct virtqueue *vq)
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
 {
+       uint16_t flags;
+
        /*
-        * Ensure updated avail->idx is visible to host.
-        * For virtio on IA, the notificaiton is through io port operation
-        * which is a serialization instruction itself.
+        * Ensure updated data is visible to vhost before reading the flags.
         */
+       virtio_mb(vq->hw->weak_barriers);
+       flags = vq->vq_packed.ring.device->desc_event_flags;
+
+       return flags != RING_EVENT_FLAGS_DISABLE;
+}
+
+/*
+ * virtqueue_kick_prepare*() or the virtio_wmb() should be called
+ * before this function to be sure that all the data is visible to vhost.
+ */
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
        VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
 }
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
        uint16_t used_idx, nused; \
-       used_idx = (vq)->vq_ring.used->idx; \
+       used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
+                                  __ATOMIC_RELAXED); \
        nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+       if (vtpci_packed_queue((vq)->hw)) { \
+               PMD_INIT_LOG(DEBUG, \
+               "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
+               " cached_flags=0x%x; used_wrap_counter=%d", \
+               (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
+               (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
+               (vq)->vq_packed.used_wrap_counter); \
+               break; \
+       } \
        PMD_INIT_LOG(DEBUG, \
          "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
          " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
          " avail.flags=0x%x; used.flags=0x%x", \
-         (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
-         (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
-         (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
-         (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+         (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
+         (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
+         __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+         (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
 } while (0)
 #else
 #define VIRTQUEUE_DUMP(vq) do { } while (0)
 #endif
 
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do {     \
+       typeof(var) *const var_ = &(var);       \
+       typeof(val)  const val_ = (val);        \
+       if (*var_ != val_)                      \
+               *var_ = val_;                   \
+} while (0)
+
+#define virtqueue_clear_net_hdr(hdr) do {              \
+       typeof(hdr) hdr_ = (hdr);                       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);     \
+       ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);    \
+       ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);          \
+       ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);       \
+       ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);        \
+} while (0)
+
+static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+                       struct rte_mbuf *cookie,
+                       bool offload)
+{
+       if (offload) {
+               if (cookie->ol_flags & PKT_TX_TCP_SEG)
+                       cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+               switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+               case PKT_TX_UDP_CKSUM:
+                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
+                       hdr->csum_offset = offsetof(struct rte_udp_hdr,
+                               dgram_cksum);
+                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+                       break;
+
+               case PKT_TX_TCP_CKSUM:
+                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
+                       hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+                       break;
+
+               default:
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+                       break;
+               }
+
+               /* TCP Segmentation Offload */
+               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                               VIRTIO_NET_HDR_GSO_TCPV6 :
+                               VIRTIO_NET_HDR_GSO_TCPV4;
+                       hdr->gso_size = cookie->tso_segsz;
+                       hdr->hdr_len =
+                               cookie->l2_len +
+                               cookie->l3_len +
+                               cookie->l4_len;
+               } else {
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+               }
+       }
+}
+
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+                             uint16_t needed, int can_push, int in_order)
+{
+       struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+       struct vq_desc_extra *dxp;
+       struct virtqueue *vq = txvq->vq;
+       struct vring_packed_desc *start_dp, *head_dp;
+       uint16_t idx, id, head_idx, head_flags;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
+       struct virtio_net_hdr *hdr;
+       uint16_t prev;
+       bool prepend_header = false;
+
+       id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+
+       dxp = &vq->vq_descx[id];
+       dxp->ndescs = needed;
+       dxp->cookie = cookie;
+
+       head_idx = vq->vq_avail_idx;
+       idx = head_idx;
+       prev = head_idx;
+       start_dp = vq->vq_packed.ring.desc;
+
+       head_dp = &vq->vq_packed.ring.desc[idx];
+       head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+       head_flags |= vq->vq_packed.cached_flags;
+
+       if (can_push) {
+               /* prepend cannot fail, checked by caller */
+               hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                             -head_size);
+               prepend_header = true;
+
+               /* if offload disabled, it is not zeroed below, do it now */
+               if (!vq->hw->has_tx_offload)
+                       virtqueue_clear_net_hdr(hdr);
+       } else {
+               /* setup first tx ring slot to point to header
+                * stored in reserved region.
+                */
+               start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+                       RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+               start_dp[idx].len   = vq->hw->vtnet_hdr_size;
+               hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+               idx++;
+               if (idx >= vq->vq_nentries) {
+                       idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^=
+                               VRING_PACKED_DESC_F_AVAIL_USED;
+               }
+       }
+
+       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+       do {
+               uint16_t flags;
+
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+               start_dp[idx].len  = cookie->data_len;
+               if (prepend_header) {
+                       start_dp[idx].addr -= head_size;
+                       start_dp[idx].len += head_size;
+                       prepend_header = false;
+               }
+
+               if (likely(idx != head_idx)) {
+                       flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+                       flags |= vq->vq_packed.cached_flags;
+                       start_dp[idx].flags = flags;
+               }
+               prev = idx;
+               idx++;
+               if (idx >= vq->vq_nentries) {
+                       idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^=
+                               VRING_PACKED_DESC_F_AVAIL_USED;
+               }
+       } while ((cookie = cookie->next) != NULL);
+
+       start_dp[prev].id = id;
+
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+       vq->vq_avail_idx = idx;
+
+       if (!in_order) {
+               vq->vq_desc_head_idx = dxp->next;
+               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+                       vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+       }
+
+       virtqueue_store_flags_packed(head_dp, head_flags,
+                                    vq->hw->weak_barriers);
+}
+
+static void
+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
+{
+       struct vq_desc_extra *dxp;
+
+       dxp = &vq->vq_descx[id];
+       vq->vq_free_cnt += dxp->ndescs;
+
+       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
+               vq->vq_desc_head_idx = id;
+       else
+               vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+
+       vq->vq_desc_tail_idx = id;
+       dxp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static void
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
+{
+       uint16_t used_idx, id, curr_id, free_cnt = 0;
+       uint16_t size = vq->vq_nentries;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+
+       used_idx = vq->vq_used_cons_idx;
+       /* desc_is_used has a load-acquire or rte_io_rmb inside
+        * and wait for used desc in virtqueue.
+        */
+       while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
+               id = desc[used_idx].id;
+               do {
+                       curr_id = used_idx;
+                       dxp = &vq->vq_descx[used_idx];
+                       used_idx += dxp->ndescs;
+                       free_cnt += dxp->ndescs;
+                       num -= dxp->ndescs;
+                       if (used_idx >= size) {
+                               used_idx -= size;
+                               vq->vq_packed.used_wrap_counter ^= 1;
+                       }
+                       if (dxp->cookie != NULL) {
+                               rte_pktmbuf_free(dxp->cookie);
+                               dxp->cookie = NULL;
+                       }
+               } while (curr_id != id);
+       }
+       vq->vq_used_cons_idx = used_idx;
+       vq->vq_free_cnt += free_cnt;
+}
+
+static void
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
+{
+       uint16_t used_idx, id;
+       uint16_t size = vq->vq_nentries;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+
+       used_idx = vq->vq_used_cons_idx;
+       /* desc_is_used has a load-acquire or rte_io_rmb inside
+        * and wait for used desc in virtqueue.
+        */
+       while (num-- && desc_is_used(&desc[used_idx], vq)) {
+               id = desc[used_idx].id;
+               dxp = &vq->vq_descx[id];
+               vq->vq_used_cons_idx += dxp->ndescs;
+               if (vq->vq_used_cons_idx >= size) {
+                       vq->vq_used_cons_idx -= size;
+                       vq->vq_packed.used_wrap_counter ^= 1;
+               }
+               vq_ring_free_id_packed(vq, id);
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+               used_idx = vq->vq_used_cons_idx;
+       }
+}
+
+/* Cleanup from completed transmits. */
+static inline void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
+{
+       if (in_order)
+               virtio_xmit_cleanup_inorder_packed(vq, num);
+       else
+               virtio_xmit_cleanup_normal_packed(vq, num);
+}
+
+static inline void
+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t i, used_idx, desc_idx;
+       for (i = 0; i < num; i++) {
+               struct vring_used_elem *uep;
+               struct vq_desc_extra *dxp;
+
+               used_idx = (uint16_t)(vq->vq_used_cons_idx &
+                               (vq->vq_nentries - 1));
+               uep = &vq->vq_split.ring.used->ring[used_idx];
+
+               desc_idx = (uint16_t)uep->id;
+               dxp = &vq->vq_descx[desc_idx];
+               vq->vq_used_cons_idx++;
+               vq_ring_free_chain(vq, desc_idx);
+
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+}
+
+/* Cleanup from completed inorder transmits. */
+static __rte_always_inline void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+       uint16_t i, idx = vq->vq_used_cons_idx;
+       int16_t free_cnt = 0;
+       struct vq_desc_extra *dxp = NULL;
+
+       if (unlikely(num == 0))
+               return;
+
+       for (i = 0; i < num; i++) {
+               dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+               free_cnt += dxp->ndescs;
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+
+       vq->vq_free_cnt += free_cnt;
+       vq->vq_used_cons_idx = idx;
+}
 #endif /* _VIRTQUEUE_H_ */