1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
4 #include <rte_rawdev.h>
5 #include <rte_ioat_rawdev.h>
11 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
13 struct packet_tracker {
14 unsigned short size_track[MAX_ENQUEUED_SIZE];
15 unsigned short next_read;
16 unsigned short next_write;
17 unsigned short last_remain;
20 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
24 open_ioat(const char *value)
26 struct dma_for_vhost *dma_info = dma_bind;
27 char *input = strndup(value, strlen(value) + 1);
30 char *start, *end, *substr;
31 int64_t vid, vring_id;
32 struct rte_ioat_rawdev_config config;
33 struct rte_rawdev_info info = { .dev_private = &config };
38 char *dma_arg[MAX_VHOST_DEVICE];
41 while (isblank(*addrs))
48 /* process DMA devices within bracket. */
50 substr = strtok(addrs, ";]");
55 args_nr = rte_strsplit(substr, strlen(substr),
56 dma_arg, MAX_VHOST_DEVICE, ',');
58 char *arg_temp = dma_arg[i];
59 rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
61 start = strstr(ptrs[0], "txd");
68 vid = strtol(start, &end, 0);
74 vring_id = 0 + VIRTIO_RXQ;
75 if (rte_pci_addr_parse(ptrs[1],
76 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
81 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
83 dev_id = rte_rawdev_get_dev_id(name);
84 if (dev_id == (uint16_t)(-ENODEV) ||
85 dev_id == (uint16_t)(-EINVAL)) {
90 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
91 strstr(info.driver_name, "ioat") == NULL) {
96 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
97 (dma_info + vid)->dmas[vring_id].is_valid = true;
98 config.ring_size = IOAT_RING_SIZE;
99 config.hdls_disable = true;
100 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
104 rte_rawdev_start(dev_id);
108 } while (i < args_nr);
115 ioat_transfer_data_cb(int vid, uint16_t queue_id,
116 struct rte_vhost_async_desc *descs,
117 struct rte_vhost_async_status *opaque_data, uint16_t count)
120 int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
121 struct rte_vhost_iov_iter *src = NULL;
122 struct rte_vhost_iov_iter *dst = NULL;
124 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
125 unsigned short write = cb_tracker[dev_id].next_write;
128 for (i_desc = 0; i_desc < count; i_desc++) {
129 src = descs[i_desc].src;
130 dst = descs[i_desc].dst;
132 while (i_seg < src->nr_segs) {
134 * TODO: Assuming that the ring space of the
135 * IOAT device is large enough, so there is no
136 * error here, and the actual error handling
137 * will be added later.
139 rte_ioat_enqueue_copy(dev_id,
140 (uintptr_t)(src->iov[i_seg].iov_base)
142 (uintptr_t)(dst->iov[i_seg].iov_base)
144 src->iov[i_seg].iov_len,
150 cb_tracker[dev_id].size_track[write] = i_seg;
154 /* Opaque data is not supported */
157 /* ring the doorbell */
158 rte_ioat_perform_ops(dev_id);
159 cb_tracker[dev_id].next_write = write;
164 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
165 struct rte_vhost_async_status *opaque_data,
166 uint16_t max_packets)
170 unsigned short n_seg;
171 unsigned short read, write;
172 unsigned short nb_packet = 0;
173 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
175 int dev_id = dma_bind[vid].dmas[queue_id * 2
176 + VIRTIO_RXQ].dev_id;
177 n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
178 n_seg += cb_tracker[dev_id].last_remain;
181 read = cb_tracker[dev_id].next_read;
182 write = cb_tracker[dev_id].next_write;
183 for (i = 0; i < max_packets; i++) {
187 if (n_seg >= cb_tracker[dev_id].size_track[read]) {
188 n_seg -= cb_tracker[dev_id].size_track[read];
195 cb_tracker[dev_id].next_read = read;
196 cb_tracker[dev_id].last_remain = n_seg;
199 /* Opaque data is not supported */