break;
while (i_seg < iter->nr_segs) {
rte_ioat_enqueue_copy(dev_id,
- (uintptr_t)(iter->iov[i_seg].src_addr)
- + iter->offset,
- (uintptr_t)(iter->iov[i_seg].dst_addr)
- + iter->offset,
+ (uintptr_t)(iter->iov[i_seg].src_addr),
+ (uintptr_t)(iter->iov[i_seg].dst_addr),
iter->iov[i_seg].len,
0,
0);
* iovec iterator
*/
struct rte_vhost_iov_iter {
- /** offset to the first byte of interesting data */
- size_t offset;
- /** total bytes of data in this iterator */
- size_t count;
/** pointer to the iovec array */
struct rte_vhost_iovec *iov;
/** number of iovec in this iterator */
}
static __rte_always_inline void
-async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
- struct rte_vhost_iovec *vec, unsigned long nr_seg)
+async_fill_iter(struct rte_vhost_iov_iter *it, struct rte_vhost_iovec *vec, unsigned long nr_seg)
{
- it->offset = 0;
- it->count = count;
-
- if (count) {
- it->iov = vec;
- it->nr_segs = nr_seg;
- } else {
- it->iov = 0;
- it->nr_segs = 0;
- }
+ it->iov = vec;
+ it->nr_segs = nr_seg;
}
static __rte_always_inline void
uint32_t cpy_len, buf_len;
int error = 0;
- uint32_t tlen = 0;
int tvec_idx = 0;
void *hpa;
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
mbuf_offset), hpa, (size_t)mapped_len);
- tlen += (uint32_t)mapped_len;
cpy_len -= (uint32_t)mapped_len;
mbuf_avail -= (uint32_t)mapped_len;
mbuf_offset += (uint32_t)mapped_len;
}
}
- async_fill_iter(iter, tlen, iovec, tvec_idx);
+ async_fill_iter(iter, iovec, tvec_idx);
out:
return error;
}