1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
4 #include <rte_rawdev.h>
5 #include <rte_ioat_rawdev.h>
11 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
13 struct packet_tracker {
14 unsigned short size_track[MAX_ENQUEUED_SIZE];
15 unsigned short next_read;
16 unsigned short next_write;
17 unsigned short last_remain;
20 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
24 open_ioat(const char *value)
26 struct dma_for_vhost *dma_info = dma_bind;
27 char *input = strndup(value, strlen(value) + 1);
30 char *start, *end, *substr;
31 int64_t vid, vring_id;
32 struct rte_ioat_rawdev_config config;
33 struct rte_rawdev_info info = { .dev_private = &config };
38 char *dma_arg[MAX_VHOST_DEVICE];
41 while (isblank(*addrs))
48 /* process DMA devices within bracket. */
50 substr = strtok(addrs, ";]");
55 args_nr = rte_strsplit(substr, strlen(substr),
56 dma_arg, MAX_VHOST_DEVICE, ',');
62 char *arg_temp = dma_arg[i];
64 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
70 start = strstr(ptrs[0], "txd");
77 vid = strtol(start, &end, 0);
83 vring_id = 0 + VIRTIO_RXQ;
84 if (rte_pci_addr_parse(ptrs[1],
85 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
90 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
92 dev_id = rte_rawdev_get_dev_id(name);
93 if (dev_id == (uint16_t)(-ENODEV) ||
94 dev_id == (uint16_t)(-EINVAL)) {
99 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
100 strstr(info.driver_name, "ioat") == NULL) {
105 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
106 (dma_info + vid)->dmas[vring_id].is_valid = true;
107 config.ring_size = IOAT_RING_SIZE;
108 config.hdls_disable = true;
109 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
113 rte_rawdev_start(dev_id);
124 ioat_transfer_data_cb(int vid, uint16_t queue_id,
125 struct rte_vhost_async_desc *descs,
126 struct rte_vhost_async_status *opaque_data, uint16_t count)
129 int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
130 struct rte_vhost_iov_iter *src = NULL;
131 struct rte_vhost_iov_iter *dst = NULL;
133 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
134 unsigned short write = cb_tracker[dev_id].next_write;
137 for (i_desc = 0; i_desc < count; i_desc++) {
138 src = descs[i_desc].src;
139 dst = descs[i_desc].dst;
141 while (i_seg < src->nr_segs) {
143 * TODO: Assuming that the ring space of the
144 * IOAT device is large enough, so there is no
145 * error here, and the actual error handling
146 * will be added later.
148 rte_ioat_enqueue_copy(dev_id,
149 (uintptr_t)(src->iov[i_seg].iov_base)
151 (uintptr_t)(dst->iov[i_seg].iov_base)
153 src->iov[i_seg].iov_len,
159 cb_tracker[dev_id].size_track[write] = i_seg;
163 /* Opaque data is not supported */
166 /* ring the doorbell */
167 rte_ioat_perform_ops(dev_id);
168 cb_tracker[dev_id].next_write = write;
173 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
174 struct rte_vhost_async_status *opaque_data,
175 uint16_t max_packets)
179 unsigned short n_seg;
180 unsigned short read, write;
181 unsigned short nb_packet = 0;
182 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
184 int dev_id = dma_bind[vid].dmas[queue_id * 2
185 + VIRTIO_RXQ].dev_id;
186 n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
187 n_seg += cb_tracker[dev_id].last_remain;
190 read = cb_tracker[dev_id].next_read;
191 write = cb_tracker[dev_id].next_write;
192 for (i = 0; i < max_packets; i++) {
196 if (n_seg >= cb_tracker[dev_id].size_track[read]) {
197 n_seg -= cb_tracker[dev_id].size_track[read];
204 cb_tracker[dev_id].next_read = read;
205 cb_tracker[dev_id].last_remain = n_seg;
208 /* Opaque data is not supported */