{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
- uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
+ uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
uint16_t start_idx, pkts_idx, vq_size;
+ uint16_t n_inflight;
uint64_t *async_pending_info;
if (!dev)
rte_spinlock_lock(&vq->access_lock);
+ n_inflight = vq->async_pkts_inflight_n;
pkts_idx = vq->async_pkts_idx;
async_pending_info = vq->async_pending_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- n_pkts_cpl =
- vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
+ n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+ 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+ vq->async_last_seg_n;
rte_smp_wmb();
- while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
+ while (likely((n_pkts_put < count) && n_inflight)) {
uint64_t info = async_pending_info[
(start_idx + n_pkts_put) & (vq_size - 1)];
uint64_t n_segs;
n_pkts_put++;
+ n_inflight--;
n_descs += info & ASYNC_PENDING_INFO_N_MSK;
n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
if (n_segs) {
- if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
+ if (unlikely(n_segs_cpl < n_segs)) {
n_pkts_put--;
+ n_inflight++;
n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
- if (n_pkts_cpl) {
+ if (n_segs_cpl) {
async_pending_info[
(start_idx + n_pkts_put) &
(vq_size - 1)] =
- ((n_segs - n_pkts_cpl) <<
+ ((n_segs - n_segs_cpl) <<
ASYNC_PENDING_INFO_N_SFT) |
(info & ASYNC_PENDING_INFO_N_MSK);
- n_pkts_cpl = 0;
+ n_segs_cpl = 0;
}
break;
}
- n_pkts_cpl -= n_segs;
+ n_segs_cpl -= n_segs;
}
}
+ vq->async_last_seg_n = n_segs_cpl;
+
if (n_pkts_put) {
- vq->async_pkts_inflight_n -= n_pkts_put;
+ vq->async_pkts_inflight_n = n_inflight;
if (likely(vq->enabled && vq->access_ok)) {
__atomic_add_fetch(&vq->used->idx,
n_descs, __ATOMIC_RELEASE);