struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
int error = 0;
+ uint64_t mapped_len;
uint32_t tlen = 0;
int tvec_idx = 0;
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (unlikely(cpy_len >= cpy_threshold)) {
- hpa = (void *)(uintptr_t)gpa_to_hpa(dev,
- buf_iova + buf_offset, cpy_len);
+ while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
+ hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ buf_iova + buf_offset,
+ cpy_len, &mapped_len);
- if (unlikely(!hpa)) {
- error = -1;
- goto out;
- }
+ if (unlikely(!hpa || mapped_len < cpy_threshold))
+ break;
async_fill_vec(src_iovec + tvec_idx,
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
- mbuf_offset), cpy_len);
+ mbuf_offset), (size_t)mapped_len);
- async_fill_vec(dst_iovec + tvec_idx, hpa, cpy_len);
+ async_fill_vec(dst_iovec + tvec_idx,
+ hpa, (size_t)mapped_len);
- tlen += cpy_len;
+ tlen += (uint32_t)mapped_len;
+ cpy_len -= (uint32_t)mapped_len;
+ mbuf_avail -= (uint32_t)mapped_len;
+ mbuf_offset += (uint32_t)mapped_len;
+ buf_avail -= (uint32_t)mapped_len;
+ buf_offset += (uint32_t)mapped_len;
tvec_idx++;
- } else {
+ }
+
+ if (likely(cpy_len)) {
if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
rte_memcpy(
(void *)((uintptr_t)(buf_addr + buf_offset)),
cpy_len;
vq->batch_copy_nb_elems++;
}
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
}
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- buf_avail -= cpy_len;
- buf_offset += cpy_len;
}
out:
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
- uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
+ uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
uint16_t start_idx, pkts_idx, vq_size;
+ uint16_t n_inflight;
uint64_t *async_pending_info;
+ if (!dev)
+ return 0;
+
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
rte_spinlock_lock(&vq->access_lock);
+ n_inflight = vq->async_pkts_inflight_n;
pkts_idx = vq->async_pkts_idx;
async_pending_info = vq->async_pending_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- n_pkts_cpl =
- vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
+ n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+ 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+ vq->async_last_seg_n;
rte_smp_wmb();
- while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
+ while (likely((n_pkts_put < count) && n_inflight)) {
uint64_t info = async_pending_info[
(start_idx + n_pkts_put) & (vq_size - 1)];
uint64_t n_segs;
n_pkts_put++;
+ n_inflight--;
n_descs += info & ASYNC_PENDING_INFO_N_MSK;
n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
if (n_segs) {
- if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
+ if (unlikely(n_segs_cpl < n_segs)) {
n_pkts_put--;
+ n_inflight++;
n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
- if (n_pkts_cpl) {
+ if (n_segs_cpl) {
async_pending_info[
(start_idx + n_pkts_put) &
(vq_size - 1)] =
- ((n_segs - n_pkts_cpl) <<
+ ((n_segs - n_segs_cpl) <<
ASYNC_PENDING_INFO_N_SFT) |
(info & ASYNC_PENDING_INFO_N_MSK);
- n_pkts_cpl = 0;
+ n_segs_cpl = 0;
}
break;
}
- n_pkts_cpl -= n_segs;
+ n_segs_cpl -= n_segs;
}
}
- if (n_pkts_put) {
- vq->async_pkts_inflight_n -= n_pkts_put;
- __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
+ vq->async_last_seg_n = n_segs_cpl;
- vhost_vring_call_split(dev, vq);
+ if (n_pkts_put) {
+ vq->async_pkts_inflight_n = n_inflight;
+ if (likely(vq->enabled && vq->access_ok)) {
+ __atomic_add_fetch(&vq->used->idx,
+ n_descs, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ }
}
if (start_idx + n_pkts_put <= vq_size) {