+static __rte_always_inline void
+vhost_update_used_packed(struct vhost_virtqueue *vq,
+ struct vring_used_elem_packed *shadow_ring,
+ uint16_t count)
+{
+ int i;
+ uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
+
+ if (count == 0)
+ return;
+
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < count; i++) {
+ vq->desc_packed[used_idx].id = shadow_ring[i].id;
+ vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+ used_idx += shadow_ring[i].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+ }
+
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+ for (i = 0; i < count; i++) {
+ uint16_t flags;
+
+ if (vq->shadow_used_packed[i].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, shadow_ring[i].count);
+ }
+
+ vq->desc_packed[head_idx].flags = head_flags;
+}
+
+static __rte_always_inline int
+virtio_dev_rx_async_batch_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts,
+ struct rte_mbuf **comp_pkts, uint32_t *pkt_done)
+{
+ uint16_t i;
+ uint32_t cpy_threshold = vq->async_threshold;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->pkt_len >= cpy_threshold))
+ return -1;
+ }
+ if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ comp_pkts[(*pkt_done)++] = pkts[i];
+
+ return 0;
+ }
+
+ return -1;
+}
+
+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt,
+ struct buf_vector *buf_vec,
+ uint16_t *nr_descs,
+ uint16_t *nr_buffers,
+ struct vring_packed_desc *async_descs,
+ struct iovec *src_iovec, struct iovec *dst_iovec,
+ struct rte_vhost_iov_iter *src_it,
+ struct rte_vhost_iov_iter *dst_it)
+{
+ uint16_t nr_vec = 0;
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint16_t max_tries, tries = 0;
+ uint16_t buf_id = 0;
+ uint32_t len = 0;
+ uint16_t desc_count = 0;
+ uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ uint32_t buffer_len[vq->size];
+ uint16_t buffer_buf_id[vq->size];
+ uint16_t buffer_desc_count[vq->size];
+
+ if (rxvq_is_mergeable(dev))
+ max_tries = vq->size - 1;
+ else
+ max_tries = 1;
+
+ while (size > 0) {
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(++tries > max_tries))
+ return -1;
+
+ if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
+ &buf_id, &len, VHOST_ACCESS_RW) < 0))
+ return -1;
+
+ len = RTE_MIN(len, size);
+ size -= len;
+
+ buffer_len[*nr_buffers] = len;
+ buffer_buf_id[*nr_buffers] = buf_id;
+ buffer_desc_count[*nr_buffers] = desc_count;
+ *nr_buffers += 1;
+
+ *nr_descs += desc_count;
+ avail_idx += desc_count;
+ if (avail_idx >= vq->size)
+ avail_idx -= vq->size;
+ }
+
+ if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, src_iovec, dst_iovec,
+ src_it, dst_it) < 0)
+ return -1;
+ /* store descriptors for DMA */
+ if (avail_idx >= *nr_descs) {
+ rte_memcpy(async_descs, &vq->desc_packed[vq->last_avail_idx],
+ *nr_descs * sizeof(struct vring_packed_desc));
+ } else {
+ uint16_t nr_copy = vq->size - vq->last_avail_idx;
+
+ rte_memcpy(async_descs, &vq->desc_packed[vq->last_avail_idx],
+ nr_copy * sizeof(struct vring_packed_desc));
+ rte_memcpy(async_descs + nr_copy, vq->desc_packed,
+ (*nr_descs - nr_copy) * sizeof(struct vring_packed_desc));
+ }
+
+ vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
+
+ return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers,
+ struct vring_packed_desc *async_descs,
+ struct iovec *src_iovec, struct iovec *dst_iovec,
+ struct rte_vhost_iov_iter *src_it, struct rte_vhost_iov_iter *dst_it)
+{
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+
+ if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec, nr_descs, nr_buffers,
+ async_descs, src_iovec, dst_iovec,
+ src_it, dst_it) < 0)) {
+ VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
+ return -1;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+
+ return 0;
+}
+
+static __rte_always_inline void
+dma_error_handler_packed(struct vhost_virtqueue *vq, struct vring_packed_desc *async_descs,
+ uint16_t async_descs_idx, uint16_t slot_idx, uint32_t nr_err,
+ uint32_t *pkt_idx, uint32_t *num_async_pkts, uint32_t *num_done_pkts)
+{
+ uint16_t descs_err = 0;
+ uint16_t buffers_err = 0;
+ struct async_inflight_info *pkts_info = vq->async_pkts_info;
+
+ *num_async_pkts -= nr_err;
+ *pkt_idx -= nr_err;
+ /* calculate the sum of buffers and descs of DMA-error packets. */
+ while (nr_err-- > 0) {
+ descs_err += pkts_info[slot_idx % vq->size].descs;
+ buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
+ slot_idx--;
+ }
+
+ vq->async_buffer_idx_packed -= buffers_err;
+
+ if (vq->last_avail_idx >= descs_err) {
+ vq->last_avail_idx -= descs_err;
+
+ rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+ &async_descs[async_descs_idx - descs_err],
+ descs_err * sizeof(struct vring_packed_desc));
+ } else {
+ uint16_t nr_copy;
+
+ vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
+ nr_copy = vq->size - vq->last_avail_idx;
+ rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+ &async_descs[async_descs_idx - descs_err],
+ nr_copy * sizeof(struct vring_packed_desc));
+ descs_err -= nr_copy;
+ rte_memcpy(&vq->desc_packed[0], &async_descs[async_descs_idx - descs_err],
+ descs_err * sizeof(struct vring_packed_desc));
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ *num_done_pkts = *pkt_idx - *num_async_pkts;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+ uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+ uint32_t remained = count;
+ uint16_t async_descs_idx = 0;
+ uint16_t num_buffers;
+ uint16_t num_descs;
+
+ struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+ struct iovec *vec_pool = vq->vec_pool;
+ struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+ struct iovec *src_iovec = vec_pool;
+ struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ uint16_t slot_idx = 0;
+ uint16_t segs_await = 0;
+ uint16_t iovec_idx = 0, it_idx = 0;
+ struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ uint32_t n_pkts = 0, pkt_err = 0;
+ uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ struct vring_packed_desc async_descs[vq->size];
+
+ do {
+ rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+ if (remained >= PACKED_BATCH_SIZE) {
+ if (!virtio_dev_rx_async_batch_packed(dev, vq,
+ &pkts[pkt_idx], comp_pkts, &num_done_pkts)) {
+ pkt_idx += PACKED_BATCH_SIZE;
+ remained -= PACKED_BATCH_SIZE;
+ continue;
+ }
+ }
+
+ num_buffers = 0;
+ num_descs = 0;
+ if (unlikely(virtio_dev_rx_async_single_packed(dev, vq, pkts[pkt_idx],
+ &num_descs, &num_buffers,
+ &async_descs[async_descs_idx],
+ &src_iovec[iovec_idx], &dst_iovec[iovec_idx],
+ &it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
+ break;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_descs);
+
+ slot_idx = (vq->async_pkts_idx + num_async_pkts) % vq->size;
+ if (it_pool[it_idx].count) {
+ uint16_t from, to;
+
+ async_descs_idx += num_descs;
+ async_fill_desc(&tdes[pkt_burst_idx++],
+ &it_pool[it_idx], &it_pool[it_idx + 1]);
+ pkts_info[slot_idx].descs = num_descs;
+ pkts_info[slot_idx].nr_buffers = num_buffers;
+ pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+ num_async_pkts++;
+ iovec_idx += it_pool[it_idx].nr_segs;
+ it_idx += 2;
+
+ segs_await += it_pool[it_idx].nr_segs;
+
+ /**
+ * recover shadow used ring and keep DMA-occupied
+ * descriptors.
+ */
+ from = vq->shadow_used_idx - num_buffers;
+ to = vq->async_buffer_idx_packed % vq->size;
+ store_dma_desc_info_packed(vq->shadow_used_packed,
+ vq->async_buffers_packed, vq->size, from, to, num_buffers);
+
+ vq->async_buffer_idx_packed += num_buffers;
+ vq->shadow_used_idx -= num_buffers;
+ } else {
+ comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+ }
+
+ pkt_idx++;
+ remained--;
+ vq_inc_last_avail_packed(vq, num_descs);
+
+ /*
+ * conditions to trigger async device transfer:
+ * - buffered packet number reaches transfer threshold
+ * - unused async iov number is less than max vhost vector
+ */
+ if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+ ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
+ tdes, 0, pkt_burst_idx);
+ iovec_idx = 0;
+ it_idx = 0;
+ segs_await = 0;
+ vq->async_pkts_inflight_n += n_pkts;
+
+ if (unlikely(n_pkts < pkt_burst_idx)) {
+ /*
+ * log error packets number here and do actual
+ * error processing when applications poll
+ * completion
+ */
+ pkt_err = pkt_burst_idx - n_pkts;
+ pkt_burst_idx = 0;
+ break;
+ }
+
+ pkt_burst_idx = 0;
+ }
+ } while (pkt_idx < count);
+
+ if (pkt_burst_idx) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ vq->async_pkts_inflight_n += n_pkts;
+
+ if (unlikely(n_pkts < pkt_burst_idx))
+ pkt_err = pkt_burst_idx - n_pkts;
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ if (unlikely(pkt_err))
+ dma_error_handler_packed(vq, async_descs, async_descs_idx, slot_idx, pkt_err,
+ &pkt_idx, &num_async_pkts, &num_done_pkts);
+ vq->async_pkts_idx += num_async_pkts;
+ *comp_count = num_done_pkts;
+
+ if (likely(vq->shadow_used_idx)) {
+ vhost_flush_enqueue_shadow_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);
+ }
+
+ return pkt_idx;
+}
+
+static __rte_always_inline void
+write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
+{
+ uint16_t nr_left = n_descs;
+ uint16_t nr_copy;
+ uint16_t to, from;
+
+ do {
+ from = vq->last_async_desc_idx_split & (vq->size - 1);
+ nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
+ to = vq->last_used_idx & (vq->size - 1);
+
+ if (to + nr_copy <= vq->size) {
+ rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ nr_copy * sizeof(struct vring_used_elem));
+ } else {
+ uint16_t size = vq->size - to;
+
+ rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ size * sizeof(struct vring_used_elem));
+ rte_memcpy(&vq->used->ring[0], &vq->async_descs_split[from + size],
+ (nr_copy - size) * sizeof(struct vring_used_elem));
+ }
+
+ vq->last_async_desc_idx_split += nr_copy;
+ vq->last_used_idx += nr_copy;
+ nr_left -= nr_copy;
+ } while (nr_left > 0);
+}
+
+static __rte_always_inline void
+write_back_completed_descs_packed(struct vhost_virtqueue *vq,
+ uint16_t n_buffers)
+{
+ uint16_t nr_left = n_buffers;
+ uint16_t from, to;
+
+ do {
+ from = vq->last_async_buffer_idx_packed % vq->size;
+ to = (from + nr_left) % vq->size;
+ if (to > from) {
+ vhost_update_used_packed(vq, vq->async_buffers_packed + from, to - from);
+ vq->last_async_buffer_idx_packed += nr_left;
+ nr_left = 0;
+ } else {
+ vhost_update_used_packed(vq, vq->async_buffers_packed + from,
+ vq->size - from);
+ vq->last_async_buffer_idx_packed += vq->size - from;
+ nr_left -= vq->size - from;
+ }
+ } while (nr_left > 0);
+}
+