+out:
+ async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+ async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+
+ return error;
+}
+
+static __rte_always_inline int
+vhost_enqueue_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt,
+ struct buf_vector *buf_vec,
+ uint16_t *nr_descs)
+{
+ uint16_t nr_vec = 0;
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint16_t max_tries, tries = 0;
+ uint16_t buf_id = 0;
+ uint32_t len = 0;
+ uint16_t desc_count;
+ uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+ uint16_t num_buffers = 0;
+ uint32_t buffer_len[vq->size];
+ uint16_t buffer_buf_id[vq->size];
+ uint16_t buffer_desc_count[vq->size];
+
+ if (rxvq_is_mergeable(dev))
+ max_tries = vq->size - 1;
+ else
+ max_tries = 1;
+
+ while (size > 0) {
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(++tries > max_tries))
+ return -1;
+
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ avail_idx, &desc_count,
+ buf_vec, &nr_vec,
+ &buf_id, &len,
+ VHOST_ACCESS_RW) < 0))
+ return -1;
+
+ len = RTE_MIN(len, size);
+ size -= len;
+
+ buffer_len[num_buffers] = len;
+ buffer_buf_id[num_buffers] = buf_id;
+ buffer_desc_count[num_buffers] = desc_count;
+ num_buffers += 1;
+
+ *nr_descs += desc_count;
+ avail_idx += desc_count;
+ if (avail_idx >= vq->size)
+ avail_idx -= vq->size;
+ }
+
+ if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+ return -1;
+
+ vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
+ buffer_desc_count, num_buffers);
+
+ return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint16_t num_buffers;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t avail_head;
+
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
+
+ if (unlikely(reserve_avail_buf_split(dev, vq,
+ pkt_len, buf_vec, &num_buffers,
+ avail_head, &nr_vec) < 0)) {
+ VHOST_LOG_DATA(DEBUG,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_buffers);
+
+ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec,
+ num_buffers) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ vq->last_avail_idx += num_buffers;
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
+
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+virtio_dev_rx_batch_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts)
+{
+ bool wrap_counter = vq->avail_wrap_counter;
+ struct vring_packed_desc *descs = vq->desc_packed;
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint64_t desc_addrs[PACKED_BATCH_SIZE];
+ struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
+ uint32_t buf_offset = dev->vhost_hlen;
+ uint64_t lens[PACKED_BATCH_SIZE];
+ uint16_t ids[PACKED_BATCH_SIZE];
+ uint16_t i;
+
+ if (unlikely(avail_idx & PACKED_BATCH_MASK))
+ return -1;
+
+ if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+ return -1;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->next != NULL))
+ return -1;
+ if (unlikely(!desc_is_avail(&descs[avail_idx + i],
+ wrap_counter)))
+ return -1;
+ }
+
+ rte_smp_rmb();
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ lens[i] = descs[avail_idx + i].len;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+ descs[avail_idx + i].addr,
+ &lens[i],
+ VHOST_ACCESS_RW);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(!desc_addrs[i]))
+ return -1;
+ if (unlikely(lens[i] != descs[avail_idx + i].len))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+ hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
+ (uintptr_t)desc_addrs[i];
+ lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
+
+ vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+ rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+ pkts[i]->pkt_len);
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
+ lens[i]);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ ids[i] = descs[avail_idx + i].id;
+
+ vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
+
+ return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt)
+{
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t nr_descs = 0;
+
+ rte_smp_rmb();
+ if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
+ &nr_descs) < 0)) {
+ VHOST_LOG_DATA(DEBUG,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ return -1;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + nr_descs);
+
+ vq_inc_last_avail_packed(vq, nr_descs);
+
+ return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *__rte_restrict vq,
+ struct rte_mbuf **__rte_restrict pkts,
+ uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint32_t remained = count;
+
+ do {
+ rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+
+ if (remained >= PACKED_BATCH_SIZE) {
+ if (!virtio_dev_rx_batch_packed(dev, vq,
+ &pkts[pkt_idx])) {
+ pkt_idx += PACKED_BATCH_SIZE;
+ remained -= PACKED_BATCH_SIZE;
+ continue;
+ }
+ }
+
+ if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
+ break;
+ pkt_idx++;
+ remained--;
+
+ } while (pkt_idx < count);
+
+ if (vq->shadow_used_idx) {
+ do_data_copy_enqueue(dev, vq);
+ vhost_flush_enqueue_shadow_packed(dev, vq);
+ }
+
+ if (pkt_idx)
+ vhost_vring_call_packed(dev, vq);
+
+ return pkt_idx;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint32_t nb_tx = 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ if (count == 0)
+ goto out;
+
+ if (vq_is_packed(dev))
+ nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
+ else
+ nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return nb_tx;
+}
+
+uint16_t
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
+ struct rte_mbuf **__rte_restrict pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return 0;
+ }
+
+ return virtio_dev_rx(dev, queue_id, pkts, count);
+}
+
+static __rte_always_inline uint16_t
+virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
+ uint16_t vq_size, uint16_t n_inflight)
+{
+ return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
+ (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
+}
+
+static __rte_always_inline void
+virtio_dev_rx_async_submit_split_err(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, uint16_t queue_id,
+ uint16_t last_idx, uint16_t shadow_idx)
+{
+ uint16_t start_idx, pkts_idx, vq_size;
+ uint64_t *async_pending_info;
+
+ pkts_idx = vq->async_pkts_idx;
+ async_pending_info = vq->async_pending_info;
+ vq_size = vq->size;
+ start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
+ vq_size, vq->async_pkts_inflight_n);
+
+ while (likely((start_idx & (vq_size - 1)) != pkts_idx)) {
+ uint64_t n_seg =
+ async_pending_info[(start_idx) & (vq_size - 1)] >>
+ ASYNC_PENDING_INFO_N_SFT;
+
+ while (n_seg)
+ n_seg -= vq->async_ops.check_completed_copies(dev->vid,
+ queue_id, 0, 1);
+ }
+
+ vq->async_pkts_inflight_n = 0;
+ vq->batch_copy_nb_elems = 0;
+
+ vq->shadow_used_idx = shadow_idx;
+ vq->last_avail_idx = last_idx;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_split(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+ uint16_t num_buffers;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t avail_head, last_idx, shadow_idx;
+
+ struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+ struct iovec *vec_pool = vq->vec_pool;
+ struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+ struct iovec *src_iovec = vec_pool;
+ struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ struct rte_vhost_iov_iter *src_it = it_pool;
+ struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+ uint16_t n_free_slot, slot_idx;
+ int n_pkts = 0;
+
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+ last_idx = vq->last_avail_idx;
+ shadow_idx = vq->shadow_used_idx;
+
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ rte_smp_rmb();
+
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
+
+ if (unlikely(reserve_avail_buf_split(dev, vq,
+ pkt_len, buf_vec, &num_buffers,
+ avail_head, &nr_vec) < 0)) {
+ VHOST_LOG_DATA(DEBUG,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_buffers);
+
+ if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec, num_buffers,
+ src_iovec, dst_iovec, src_it, dst_it) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ if (src_it->count) {
+ async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
+ pkt_burst_idx++;
+ vq->async_pending_info[slot_idx] =
+ num_buffers | (src_it->nr_segs << 16);
+ src_iovec += src_it->nr_segs;
+ dst_iovec += dst_it->nr_segs;
+ src_it += 2;
+ dst_it += 2;
+ } else {
+ vq->async_pending_info[slot_idx] = num_buffers;
+ vq->async_pkts_inflight_n++;
+ }
+
+ vq->last_avail_idx += num_buffers;
+
+ if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+ (pkt_idx == count - 1 && pkt_burst_idx)) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ src_iovec = vec_pool;
+ dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ src_it = it_pool;
+ dst_it = it_pool + 1;
+
+ if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+ vq->async_pkts_inflight_n +=
+ n_pkts > 0 ? n_pkts : 0;
+ virtio_dev_rx_async_submit_split_err(dev,
+ vq, queue_id, last_idx, shadow_idx);
+ return 0;
+ }
+
+ pkt_burst_idx = 0;
+ vq->async_pkts_inflight_n += n_pkts;
+ }
+ }
+
+ if (pkt_burst_idx) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+ vq->async_pkts_inflight_n += n_pkts > 0 ? n_pkts : 0;
+ virtio_dev_rx_async_submit_split_err(dev, vq, queue_id,
+ last_idx, shadow_idx);
+ return 0;
+ }
+
+ vq->async_pkts_inflight_n += n_pkts;
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ n_free_slot = vq->size - vq->async_pkts_idx;
+ if (n_free_slot > pkt_idx) {
+ rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+ pkts, pkt_idx * sizeof(uintptr_t));
+ vq->async_pkts_idx += pkt_idx;
+ } else {
+ rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+ pkts, n_free_slot * sizeof(uintptr_t));
+ rte_memcpy(&vq->async_pkts_pending[0],
+ &pkts[n_free_slot],
+ (pkt_idx - n_free_slot) * sizeof(uintptr_t));
+ vq->async_pkts_idx = pkt_idx - n_free_slot;
+ }
+
+ if (likely(vq->shadow_used_idx))
+ async_flush_shadow_used_ring_split(dev, vq);
+
+ return pkt_idx;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
+ uint16_t start_idx, pkts_idx, vq_size;
+ uint16_t n_inflight;
+ uint64_t *async_pending_info;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_inflight = vq->async_pkts_inflight_n;
+ pkts_idx = vq->async_pkts_idx;
+ async_pending_info = vq->async_pending_info;
+ vq_size = vq->size;
+ start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
+ vq_size, vq->async_pkts_inflight_n);
+
+ n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+ 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+ vq->async_last_seg_n;
+
+ rte_smp_wmb();
+
+ while (likely((n_pkts_put < count) && n_inflight)) {
+ uint64_t info = async_pending_info[
+ (start_idx + n_pkts_put) & (vq_size - 1)];
+ uint64_t n_segs;
+ n_pkts_put++;
+ n_inflight--;
+ n_descs += info & ASYNC_PENDING_INFO_N_MSK;
+ n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
+
+ if (n_segs) {
+ if (unlikely(n_segs_cpl < n_segs)) {
+ n_pkts_put--;
+ n_inflight++;
+ n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
+ if (n_segs_cpl) {
+ async_pending_info[
+ (start_idx + n_pkts_put) &
+ (vq_size - 1)] =
+ ((n_segs - n_segs_cpl) <<
+ ASYNC_PENDING_INFO_N_SFT) |
+ (info & ASYNC_PENDING_INFO_N_MSK);
+ n_segs_cpl = 0;
+ }
+ break;
+ }
+ n_segs_cpl -= n_segs;
+ }
+ }
+
+ vq->async_last_seg_n = n_segs_cpl;
+
+ if (n_pkts_put) {
+ vq->async_pkts_inflight_n = n_inflight;
+ if (likely(vq->enabled && vq->access_ok)) {
+ __atomic_add_fetch(&vq->used->idx,
+ n_descs, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ }
+ }
+
+ if (start_idx + n_pkts_put <= vq_size) {
+ rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+ n_pkts_put * sizeof(uintptr_t));
+ } else {
+ rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+ (vq_size - start_idx) * sizeof(uintptr_t));
+ rte_memcpy(&pkts[vq_size - start_idx], vq->async_pkts_pending,
+ (n_pkts_put - vq_size + start_idx) * sizeof(uintptr_t));
+ }
+
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return n_pkts_put;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint32_t nb_tx = 0;
+ bool drawback = false;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
+ if (unlikely(!vq->async_registered)) {
+ drawback = true;
+ goto out_access_unlock;
+ }
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ if (count == 0)
+ goto out;
+
+ /* TODO: packed queue not implemented */
+ if (vq_is_packed(dev))
+ nb_tx = 0;
+ else
+ nb_tx = virtio_dev_rx_async_submit_split(dev,
+ vq, queue_id, pkts, count);
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ if (drawback)
+ return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count);
+
+ return nb_tx;
+}
+
+uint16_t
+rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return 0;
+ }
+
+ return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);