unsigned short next_read;
unsigned short next_write;
unsigned short last_remain;
+ unsigned short ioat_space;
};
struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
goto out;
}
rte_rawdev_start(dev_id);
-
+ cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
dma_info->nr++;
i++;
}
src = descs[i_desc].src;
dst = descs[i_desc].dst;
i_seg = 0;
+ if (cb_tracker[dev_id].ioat_space < src->nr_segs)
+ break;
while (i_seg < src->nr_segs) {
- /*
- * TODO: Assuming that the ring space of the
- * IOAT device is large enough, so there is no
- * error here, and the actual error handling
- * will be added later.
- */
rte_ioat_enqueue_copy(dev_id,
(uintptr_t)(src->iov[i_seg].iov_base)
+ src->offset,
i_seg++;
}
write &= mask;
- cb_tracker[dev_id].size_track[write] = i_seg;
+ cb_tracker[dev_id].size_track[write] = src->nr_segs;
+ cb_tracker[dev_id].ioat_space -= src->nr_segs;
write++;
}
} else {
{
if (!opaque_data) {
uintptr_t dump[255];
- unsigned short n_seg;
+ int n_seg;
unsigned short read, write;
unsigned short nb_packet = 0;
unsigned short mask = MAX_ENQUEUED_SIZE - 1;
unsigned short i;
+
int dev_id = dma_bind[vid].dmas[queue_id * 2
+ VIRTIO_RXQ].dev_id;
n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
- n_seg += cb_tracker[dev_id].last_remain;
- if (!n_seg)
+ if (n_seg <= 0)
return 0;
+
+ cb_tracker[dev_id].ioat_space += n_seg;
+ n_seg += cb_tracker[dev_id].last_remain;
+
read = cb_tracker[dev_id].next_read;
write = cb_tracker[dev_id].next_write;
for (i = 0; i < max_packets; i++) {