int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data, uint16_t count)
{
- uint32_t i_desc;
+ uint32_t i_iter;
uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
struct rte_vhost_iov_iter *iter = NULL;
unsigned long i_seg;
unsigned short write = cb_tracker[dev_id].next_write;
if (!opaque_data) {
- for (i_desc = 0; i_desc < count; i_desc++) {
- iter = descs[i_desc].iter;
+ for (i_iter = 0; i_iter < count; i_iter++) {
+ iter = iov_iter + i_iter;
i_seg = 0;
if (cb_tracker[dev_id].ioat_space < iter->nr_segs)
break;
/* ring the doorbell */
rte_ioat_perform_ops(dev_id);
cb_tracker[dev_id].next_write = write;
- return i_desc;
+ return i_iter;
}
int32_t
int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data, uint16_t count);
int32_t
static int32_t
ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
- struct rte_vhost_async_desc *descs __rte_unused,
+ struct rte_vhost_iov_iter *iov_iter __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
uint16_t count __rte_unused)
{
unsigned long nr_segs;
};
-/**
- * dma transfer descriptor
- */
-struct rte_vhost_async_desc {
- /* memory iov_iter */
- struct rte_vhost_iov_iter *iter;
-};
-
/**
* dma transfer status
*/
* id of vhost device to perform data copies
* @param queue_id
* queue id to perform data copies
- * @param descs
- * an array of DMA transfer memory descriptors
+ * @param iov_iter
+ * an array of IOV iterators
* @param opaque_data
* opaque data pair sending to DMA engine
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed, negative value means error
+ * number of IOV iterators processed, negative value means error
*/
int32_t (*transfer_data)(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
/**
async->iovec_idx = 0;
}
-static __rte_always_inline void
-async_fill_descs(struct vhost_async *async, struct rte_vhost_async_desc *descs)
-{
- int i;
-
- for (i = 0; i < async->iter_idx; i++)
- descs[i].iter = async->iov_iter + i;
-}
-
static __rte_always_inline int
async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, struct buf_vector *buf_vec,
uint16_t avail_head;
struct vhost_async *async = vq->async;
- struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t pkt_err = 0;
int32_t n_xfer;
if (unlikely(pkt_idx == 0))
return 0;
- async_fill_descs(async, async_descs);
-
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
dev->vid, __func__, queue_id);
uint16_t num_descs;
struct vhost_async *async = vq->async;
- struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t pkt_err = 0;
uint16_t slot_idx = 0;
if (unlikely(pkt_idx == 0))
return 0;
- async_fill_descs(async, async_descs);
-
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
dev->vid, __func__, queue_id);