return 0;
}
+static __rte_unused int
+virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts)
+{
+ struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
+ uintptr_t desc_addrs[PACKED_BATCH_SIZE];
+ uint16_t ids[PACKED_BATCH_SIZE];
+ uint16_t i;
+
+ uint16_t avail_idx = vq->last_avail_idx;
+
+ if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
+ avail_idx, desc_addrs, ids))
+ return -1;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ zmbufs[i] = get_zmbuf(vq);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (!zmbufs[i])
+ goto free_pkt;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ zmbufs[i]->mbuf = pkts[i];
+ zmbufs[i]->desc_idx = avail_idx + i;
+ zmbufs[i]->desc_count = 1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ rte_mbuf_refcnt_update(pkts[i], 1);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
+
+ vq->nr_zmbuf += PACKED_BATCH_SIZE;
+ vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+ return 0;
+
+free_pkt:
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ rte_pktmbuf_free(pkts[i]);
+
+ return -1;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts)
+{
+ uint16_t buf_id, desc_count;
+ struct zcopy_mbuf *zmbuf;
+
+ if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+ &desc_count))
+ return -1;
+
+ zmbuf = get_zmbuf(vq);
+ if (!zmbuf) {
+ rte_pktmbuf_free(*pkts);
+ return -1;
+ }
+ zmbuf->mbuf = *pkts;
+ zmbuf->desc_idx = vq->last_avail_idx;
+ zmbuf->desc_count = desc_count;
+
+ rte_mbuf_refcnt_update(*pkts, 1);
+
+ vq->nr_zmbuf += 1;
+ TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+
+ vq_inc_last_avail_packed(vq, desc_count);
+ return 0;
+}
+
+static __rte_unused void
+free_zmbuf(struct vhost_virtqueue *vq)
+{
+ struct zcopy_mbuf *next = NULL;
+ struct zcopy_mbuf *zmbuf;
+
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
+
+ uint16_t last_used_idx = vq->last_used_idx;
+
+ if (mbuf_is_consumed(zmbuf->mbuf)) {
+ uint16_t flags;
+ flags = vq->desc_packed[last_used_idx].flags;
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
+ vq->desc_packed[last_used_idx].len = 0;
+
+ rte_smp_wmb();
+ vq->desc_packed[last_used_idx].flags = flags;
+
+ vq_inc_last_used_packed(vq, zmbuf->desc_count);
+
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+ }
+}
+
static __rte_noinline uint16_t
virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)