+ vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc));
+ vq->shadow_used_idx = 0;
+ vhost_log_cache_sync(dev, vq);
+}
+
+static __rte_always_inline void
+vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t *lens,
+ uint16_t *ids)
+{
+ uint16_t i;
+ uint16_t flags;
+
+ flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ vq->desc_packed[vq->last_used_idx + i].id = ids[i];
+ vq->desc_packed[vq->last_used_idx + i].len = lens[i];
+ }
+
+ rte_smp_wmb();
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ vq->desc_packed[vq->last_used_idx + i].flags = flags;
+
+ vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc) *
+ PACKED_BATCH_SIZE);
+ vhost_log_cache_sync(dev, vq);
+
+ vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
+static __rte_always_inline void
+vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
+ uint16_t id)
+{
+ vq->shadow_used_packed[0].id = id;
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].flags =
+ PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].count = 1;
+ vq->shadow_used_idx++;
+ }
+
+ vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
+static __rte_always_inline void
+vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint16_t *ids)
+{
+ uint16_t flags;
+ uint16_t i;
+ uint16_t begin;
+
+ flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].id = ids[0];
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].count = 1;
+ vq->shadow_used_packed[0].flags = flags;
+ vq->shadow_used_idx++;
+ begin = 1;
+ } else
+ begin = 0;
+
+ vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
+ vq->desc_packed[vq->last_used_idx + i].id = ids[i];
+ vq->desc_packed[vq->last_used_idx + i].len = 0;
+ }
+
+ rte_smp_wmb();
+ vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
+ vq->desc_packed[vq->last_used_idx + i].flags = flags;
+
+ vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc) *
+ PACKED_BATCH_SIZE);
+ vhost_log_cache_sync(dev, vq);
+
+ vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
+static __rte_always_inline void
+vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
+ uint16_t buf_id,
+ uint16_t count)
+{
+ uint16_t flags;
+
+ flags = vq->desc_packed[vq->last_used_idx].flags;
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+
+ vq->shadow_used_packed[0].id = buf_id;
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].flags = flags;
+ vq->shadow_used_idx++;
+ } else {
+ vq->desc_packed[vq->last_used_idx].id = buf_id;
+ vq->desc_packed[vq->last_used_idx].len = 0;
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, count);
+}
+
+static __rte_always_inline void
+vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
+ uint16_t buf_id,
+ uint16_t count)
+{
+ uint16_t flags;
+
+ vq->shadow_used_packed[0].id = buf_id;
+
+ flags = vq->desc_packed[vq->last_used_idx].flags;
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].flags = flags;
+ vq->shadow_used_idx++;
+ }
+
+ vq_inc_last_used_packed(vq, count);