/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2020 Intel Corporation
*/
+
+#include <sys/uio.h>
+#ifdef RTE_RAW_IOAT
#include <rte_rawdev.h>
#include <rte_ioat_rawdev.h>
struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
+struct packet_tracker {
+ unsigned short size_track[MAX_ENQUEUED_SIZE];
+ unsigned short next_read;
+ unsigned short next_write;
+ unsigned short last_remain;
+ unsigned short ioat_space;
+};
+
+struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
+
int
open_ioat(const char *value)
{
int ret = 0;
uint16_t i = 0;
char *dma_arg[MAX_VHOST_DEVICE];
- uint8_t args_nr;
+ int args_nr;
while (isblank(*addrs))
addrs++;
}
args_nr = rte_strsplit(substr, strlen(substr),
dma_arg, MAX_VHOST_DEVICE, ',');
- do {
+ if (args_nr <= 0) {
+ ret = -1;
+ goto out;
+ }
+ while (i < args_nr) {
char *arg_temp = dma_arg[i];
- rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
+ uint8_t sub_nr;
+ sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
+ if (sub_nr != 2) {
+ ret = -1;
+ goto out;
+ }
start = strstr(ptrs[0], "txd");
if (start == NULL) {
goto out;
}
rte_rawdev_start(dev_id);
-
+ cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
dma_info->nr++;
i++;
- } while (i < args_nr);
+ }
out:
free(input);
return ret;
}
+
+uint32_t
+ioat_transfer_data_cb(int vid, uint16_t queue_id,
+ struct rte_vhost_async_desc *descs,
+ struct rte_vhost_async_status *opaque_data, uint16_t count)
+{
+ uint32_t i_desc;
+ uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
+ struct rte_vhost_iov_iter *src = NULL;
+ struct rte_vhost_iov_iter *dst = NULL;
+ unsigned long i_seg;
+ unsigned short mask = MAX_ENQUEUED_SIZE - 1;
+ unsigned short write = cb_tracker[dev_id].next_write;
+
+ if (!opaque_data) {
+ for (i_desc = 0; i_desc < count; i_desc++) {
+ src = descs[i_desc].src;
+ dst = descs[i_desc].dst;
+ i_seg = 0;
+ if (cb_tracker[dev_id].ioat_space < src->nr_segs)
+ break;
+ while (i_seg < src->nr_segs) {
+ rte_ioat_enqueue_copy(dev_id,
+ (uintptr_t)(src->iov[i_seg].iov_base)
+ + src->offset,
+ (uintptr_t)(dst->iov[i_seg].iov_base)
+ + dst->offset,
+ src->iov[i_seg].iov_len,
+ 0,
+ 0);
+ i_seg++;
+ }
+ write &= mask;
+ cb_tracker[dev_id].size_track[write] = src->nr_segs;
+ cb_tracker[dev_id].ioat_space -= src->nr_segs;
+ write++;
+ }
+ } else {
+ /* Opaque data is not supported */
+ return -1;
+ }
+ /* ring the doorbell */
+ rte_ioat_perform_ops(dev_id);
+ cb_tracker[dev_id].next_write = write;
+ return i_desc;
+}
+
+uint32_t
+ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
+ struct rte_vhost_async_status *opaque_data,
+ uint16_t max_packets)
+{
+ if (!opaque_data) {
+ uintptr_t dump[255];
+ int n_seg;
+ unsigned short read, write;
+ unsigned short nb_packet = 0;
+ unsigned short mask = MAX_ENQUEUED_SIZE - 1;
+ unsigned short i;
+
+ uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2
+ + VIRTIO_RXQ].dev_id;
+ n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
+ if (n_seg < 0) {
+ RTE_LOG(ERR,
+ VHOST_DATA,
+ "fail to poll completed buf on IOAT device %u",
+ dev_id);
+ return 0;
+ }
+ if (n_seg == 0)
+ return 0;
+
+ cb_tracker[dev_id].ioat_space += n_seg;
+ n_seg += cb_tracker[dev_id].last_remain;
+
+ read = cb_tracker[dev_id].next_read;
+ write = cb_tracker[dev_id].next_write;
+ for (i = 0; i < max_packets; i++) {
+ read &= mask;
+ if (read == write)
+ break;
+ if (n_seg >= cb_tracker[dev_id].size_track[read]) {
+ n_seg -= cb_tracker[dev_id].size_track[read];
+ read++;
+ nb_packet++;
+ } else {
+ break;
+ }
+ }
+ cb_tracker[dev_id].next_read = read;
+ cb_tracker[dev_id].last_remain = n_seg;
+ return nb_packet;
+ }
+ /* Opaque data is not supported */
+ return -1;
+}
+
+#endif /* RTE_RAW_IOAT */