1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
7 #include <rte_rawdev.h>
8 #include <rte_ioat_rawdev.h>
13 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
15 struct packet_tracker {
16 unsigned short size_track[MAX_ENQUEUED_SIZE];
17 unsigned short next_read;
18 unsigned short next_write;
19 unsigned short last_remain;
20 unsigned short ioat_space;
23 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
26 open_ioat(const char *value)
28 struct dma_for_vhost *dma_info = dma_bind;
29 char *input = strndup(value, strlen(value) + 1);
32 char *start, *end, *substr;
33 int64_t vid, vring_id;
34 struct rte_ioat_rawdev_config config;
35 struct rte_rawdev_info info = { .dev_private = &config };
40 char *dma_arg[MAX_VHOST_DEVICE];
43 while (isblank(*addrs))
50 /* process DMA devices within bracket. */
52 substr = strtok(addrs, ";]");
57 args_nr = rte_strsplit(substr, strlen(substr),
58 dma_arg, MAX_VHOST_DEVICE, ',');
64 char *arg_temp = dma_arg[i];
66 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
72 start = strstr(ptrs[0], "txd");
79 vid = strtol(start, &end, 0);
85 vring_id = 0 + VIRTIO_RXQ;
86 if (rte_pci_addr_parse(ptrs[1],
87 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
92 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
94 dev_id = rte_rawdev_get_dev_id(name);
95 if (dev_id == (uint16_t)(-ENODEV) ||
96 dev_id == (uint16_t)(-EINVAL)) {
101 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
102 strstr(info.driver_name, "ioat") == NULL) {
107 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
108 (dma_info + vid)->dmas[vring_id].is_valid = true;
109 config.ring_size = IOAT_RING_SIZE;
110 config.hdls_disable = true;
111 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
115 rte_rawdev_start(dev_id);
116 cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE - 1;
126 ioat_transfer_data_cb(int vid, uint16_t queue_id,
127 struct rte_vhost_iov_iter *iov_iter,
128 struct rte_vhost_async_status *opaque_data, uint16_t count)
131 uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
132 struct rte_vhost_iov_iter *iter = NULL;
134 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
135 unsigned short write = cb_tracker[dev_id].next_write;
138 for (i_iter = 0; i_iter < count; i_iter++) {
139 iter = iov_iter + i_iter;
141 if (cb_tracker[dev_id].ioat_space < iter->nr_segs)
143 while (i_seg < iter->nr_segs) {
144 rte_ioat_enqueue_copy(dev_id,
145 (uintptr_t)(iter->iov[i_seg].src_addr),
146 (uintptr_t)(iter->iov[i_seg].dst_addr),
147 iter->iov[i_seg].len,
153 cb_tracker[dev_id].size_track[write] = iter->nr_segs;
154 cb_tracker[dev_id].ioat_space -= iter->nr_segs;
158 /* Opaque data is not supported */
161 /* ring the doorbell */
162 rte_ioat_perform_ops(dev_id);
163 cb_tracker[dev_id].next_write = write;
168 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
169 struct rte_vhost_async_status *opaque_data,
170 uint16_t max_packets)
175 unsigned short read, write;
176 unsigned short nb_packet = 0;
177 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
180 uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2
181 + VIRTIO_RXQ].dev_id;
182 n_seg = rte_ioat_completed_ops(dev_id, 255, NULL, NULL, dump, dump);
186 "fail to poll completed buf on IOAT device %u",
193 cb_tracker[dev_id].ioat_space += n_seg;
194 n_seg += cb_tracker[dev_id].last_remain;
196 read = cb_tracker[dev_id].next_read;
197 write = cb_tracker[dev_id].next_write;
198 for (i = 0; i < max_packets; i++) {
202 if (n_seg >= cb_tracker[dev_id].size_track[read]) {
203 n_seg -= cb_tracker[dev_id].size_track[read];
210 cb_tracker[dev_id].next_read = read;
211 cb_tracker[dev_id].last_remain = n_seg;
214 /* Opaque data is not supported */
218 #endif /* RTE_RAW_IOAT */