+
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
+
+ from++;
+ if (from == vq->size)
+ from = 0;
+ }
+
+ vq->desc_packed[head_idx].flags = head_flags;
+ async->last_buffer_idx_packed = from;
+}
+
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ int32_t n_cpl;
+ uint16_t n_descs = 0, n_buffers = 0;
+ uint16_t start_idx, from, i;
+
+ n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);
+ if (unlikely(n_cpl < 0)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to check completed copies for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
+ }
+
+ if (n_cpl == 0)
+ return 0;
+
+ start_idx = async_get_first_inflight_pkt_idx(vq);
+
+ for (i = 0; i < n_cpl; i++) {
+ from = (start_idx + i) % vq->size;
+ /* Only used with packed ring */
+ n_buffers += pkts_info[from].nr_buffers;
+ /* Only used with split ring */
+ n_descs += pkts_info[from].descs;
+ pkts[i] = pkts_info[from].mbuf;
+ }
+
+ async->pkts_inflight_n -= n_cpl;
+
+ if (likely(vq->enabled && vq->access_ok)) {
+ if (vq_is_packed(dev)) {
+ write_back_completed_descs_packed(vq, n_buffers);
+ vhost_vring_call_packed(dev, vq);
+ } else {
+ write_back_completed_descs_split(vq, n_descs);
+ __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ }
+ } else {
+ if (vq_is_packed(dev)) {
+ async->last_buffer_idx_packed += n_buffers;
+ if (async->last_buffer_idx_packed >= vq->size)
+ async->last_buffer_idx_packed -= vq->size;
+ } else {
+ async->last_desc_idx_split += n_descs;
+ }
+ }
+
+ return n_cpl;