1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
7 #include <rte_rawdev.h>
8 #include <rte_ioat_rawdev.h>
13 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
15 struct packet_tracker {
16 unsigned short size_track[MAX_ENQUEUED_SIZE];
17 unsigned short next_read;
18 unsigned short next_write;
19 unsigned short last_remain;
20 unsigned short ioat_space;
23 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
26 open_ioat(const char *value)
28 struct dma_for_vhost *dma_info = dma_bind;
29 char *input = strndup(value, strlen(value) + 1);
32 char *start, *end, *substr;
33 int64_t vid, vring_id;
34 struct rte_ioat_rawdev_config config;
35 struct rte_rawdev_info info = { .dev_private = &config };
40 char *dma_arg[MAX_VHOST_DEVICE];
43 while (isblank(*addrs))
50 /* process DMA devices within bracket. */
52 substr = strtok(addrs, ";]");
57 args_nr = rte_strsplit(substr, strlen(substr),
58 dma_arg, MAX_VHOST_DEVICE, ',');
64 char *arg_temp = dma_arg[i];
66 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
72 start = strstr(ptrs[0], "txd");
79 vid = strtol(start, &end, 0);
85 vring_id = 0 + VIRTIO_RXQ;
86 if (rte_pci_addr_parse(ptrs[1],
87 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
92 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
94 dev_id = rte_rawdev_get_dev_id(name);
95 if (dev_id == (uint16_t)(-ENODEV) ||
96 dev_id == (uint16_t)(-EINVAL)) {
101 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
102 strstr(info.driver_name, "ioat") == NULL) {
107 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
108 (dma_info + vid)->dmas[vring_id].is_valid = true;
109 config.ring_size = IOAT_RING_SIZE;
110 config.hdls_disable = true;
111 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
115 rte_rawdev_start(dev_id);
116 cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE - 1;
126 ioat_transfer_data_cb(int vid, uint16_t queue_id,
127 struct rte_vhost_async_desc *descs,
128 struct rte_vhost_async_status *opaque_data, uint16_t count)
131 uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
132 struct rte_vhost_iov_iter *src = NULL;
133 struct rte_vhost_iov_iter *dst = NULL;
135 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
136 unsigned short write = cb_tracker[dev_id].next_write;
139 for (i_desc = 0; i_desc < count; i_desc++) {
140 src = descs[i_desc].src;
141 dst = descs[i_desc].dst;
143 if (cb_tracker[dev_id].ioat_space < src->nr_segs)
145 while (i_seg < src->nr_segs) {
146 rte_ioat_enqueue_copy(dev_id,
147 (uintptr_t)(src->iov[i_seg].iov_base)
149 (uintptr_t)(dst->iov[i_seg].iov_base)
151 src->iov[i_seg].iov_len,
157 cb_tracker[dev_id].size_track[write] = src->nr_segs;
158 cb_tracker[dev_id].ioat_space -= src->nr_segs;
162 /* Opaque data is not supported */
165 /* ring the doorbell */
166 rte_ioat_perform_ops(dev_id);
167 cb_tracker[dev_id].next_write = write;
172 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
173 struct rte_vhost_async_status *opaque_data,
174 uint16_t max_packets)
179 unsigned short read, write;
180 unsigned short nb_packet = 0;
181 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
184 uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2
185 + VIRTIO_RXQ].dev_id;
186 n_seg = rte_ioat_completed_ops(dev_id, 255, NULL, NULL, dump, dump);
190 "fail to poll completed buf on IOAT device %u",
197 cb_tracker[dev_id].ioat_space += n_seg;
198 n_seg += cb_tracker[dev_id].last_remain;
200 read = cb_tracker[dev_id].next_read;
201 write = cb_tracker[dev_id].next_write;
202 for (i = 0; i < max_packets; i++) {
206 if (n_seg >= cb_tracker[dev_id].size_track[read]) {
207 n_seg -= cb_tracker[dev_id].size_track[read];
214 cb_tracker[dev_id].next_read = read;
215 cb_tracker[dev_id].last_remain = n_seg;
218 /* Opaque data is not supported */
222 #endif /* RTE_RAW_IOAT */