dbad28d43eb3022524b7cdefb3fa55bc52bbd91d
[dpdk.git] / examples / vhost / ioat.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4
5 #include <sys/uio.h>
6 #ifdef RTE_RAW_IOAT
7 #include <rte_rawdev.h>
8 #include <rte_ioat_rawdev.h>
9
10 #include "ioat.h"
11 #include "main.h"
12
13 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
14
15 struct packet_tracker {
16         unsigned short size_track[MAX_ENQUEUED_SIZE];
17         unsigned short next_read;
18         unsigned short next_write;
19         unsigned short last_remain;
20         unsigned short ioat_space;
21 };
22
23 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
24
25
26 int
27 open_ioat(const char *value)
28 {
29         struct dma_for_vhost *dma_info = dma_bind;
30         char *input = strndup(value, strlen(value) + 1);
31         char *addrs = input;
32         char *ptrs[2];
33         char *start, *end, *substr;
34         int64_t vid, vring_id;
35         struct rte_ioat_rawdev_config config;
36         struct rte_rawdev_info info = { .dev_private = &config };
37         char name[32];
38         int dev_id;
39         int ret = 0;
40         uint16_t i = 0;
41         char *dma_arg[MAX_VHOST_DEVICE];
42         int args_nr;
43
44         while (isblank(*addrs))
45                 addrs++;
46         if (*addrs == '\0') {
47                 ret = -1;
48                 goto out;
49         }
50
51         /* process DMA devices within bracket. */
52         addrs++;
53         substr = strtok(addrs, ";]");
54         if (!substr) {
55                 ret = -1;
56                 goto out;
57         }
58         args_nr = rte_strsplit(substr, strlen(substr),
59                         dma_arg, MAX_VHOST_DEVICE, ',');
60         if (args_nr <= 0) {
61                 ret = -1;
62                 goto out;
63         }
64         while (i < args_nr) {
65                 char *arg_temp = dma_arg[i];
66                 uint8_t sub_nr;
67                 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
68                 if (sub_nr != 2) {
69                         ret = -1;
70                         goto out;
71                 }
72
73                 start = strstr(ptrs[0], "txd");
74                 if (start == NULL) {
75                         ret = -1;
76                         goto out;
77                 }
78
79                 start += 3;
80                 vid = strtol(start, &end, 0);
81                 if (end == start) {
82                         ret = -1;
83                         goto out;
84                 }
85
86                 vring_id = 0 + VIRTIO_RXQ;
87                 if (rte_pci_addr_parse(ptrs[1],
88                                 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
89                         ret = -1;
90                         goto out;
91                 }
92
93                 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
94                                 name, sizeof(name));
95                 dev_id = rte_rawdev_get_dev_id(name);
96                 if (dev_id == (uint16_t)(-ENODEV) ||
97                 dev_id == (uint16_t)(-EINVAL)) {
98                         ret = -1;
99                         goto out;
100                 }
101
102                 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
103                 strstr(info.driver_name, "ioat") == NULL) {
104                         ret = -1;
105                         goto out;
106                 }
107
108                 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
109                 (dma_info + vid)->dmas[vring_id].is_valid = true;
110                 config.ring_size = IOAT_RING_SIZE;
111                 config.hdls_disable = true;
112                 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
113                         ret = -1;
114                         goto out;
115                 }
116                 rte_rawdev_start(dev_id);
117                 cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE;
118                 dma_info->nr++;
119                 i++;
120         }
121 out:
122         free(input);
123         return ret;
124 }
125
126 uint32_t
127 ioat_transfer_data_cb(int vid, uint16_t queue_id,
128                 struct rte_vhost_async_desc *descs,
129                 struct rte_vhost_async_status *opaque_data, uint16_t count)
130 {
131         uint32_t i_desc;
132         int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
133         struct rte_vhost_iov_iter *src = NULL;
134         struct rte_vhost_iov_iter *dst = NULL;
135         unsigned long i_seg;
136         unsigned short mask = MAX_ENQUEUED_SIZE - 1;
137         unsigned short write = cb_tracker[dev_id].next_write;
138
139         if (!opaque_data) {
140                 for (i_desc = 0; i_desc < count; i_desc++) {
141                         src = descs[i_desc].src;
142                         dst = descs[i_desc].dst;
143                         i_seg = 0;
144                         if (cb_tracker[dev_id].ioat_space < src->nr_segs)
145                                 break;
146                         while (i_seg < src->nr_segs) {
147                                 rte_ioat_enqueue_copy(dev_id,
148                                         (uintptr_t)(src->iov[i_seg].iov_base)
149                                                 + src->offset,
150                                         (uintptr_t)(dst->iov[i_seg].iov_base)
151                                                 + dst->offset,
152                                         src->iov[i_seg].iov_len,
153                                         0,
154                                         0);
155                                 i_seg++;
156                         }
157                         write &= mask;
158                         cb_tracker[dev_id].size_track[write] = src->nr_segs;
159                         cb_tracker[dev_id].ioat_space -= src->nr_segs;
160                         write++;
161                 }
162         } else {
163                 /* Opaque data is not supported */
164                 return -1;
165         }
166         /* ring the doorbell */
167         rte_ioat_perform_ops(dev_id);
168         cb_tracker[dev_id].next_write = write;
169         return i_desc;
170 }
171
172 uint32_t
173 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
174                 struct rte_vhost_async_status *opaque_data,
175                 uint16_t max_packets)
176 {
177         if (!opaque_data) {
178                 uintptr_t dump[255];
179                 int n_seg;
180                 unsigned short read, write;
181                 unsigned short nb_packet = 0;
182                 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
183                 unsigned short i;
184
185                 int dev_id = dma_bind[vid].dmas[queue_id * 2
186                                 + VIRTIO_RXQ].dev_id;
187                 n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
188                 if (n_seg <= 0)
189                         return 0;
190
191                 cb_tracker[dev_id].ioat_space += n_seg;
192                 n_seg += cb_tracker[dev_id].last_remain;
193
194                 read = cb_tracker[dev_id].next_read;
195                 write = cb_tracker[dev_id].next_write;
196                 for (i = 0; i < max_packets; i++) {
197                         read &= mask;
198                         if (read == write)
199                                 break;
200                         if (n_seg >= cb_tracker[dev_id].size_track[read]) {
201                                 n_seg -= cb_tracker[dev_id].size_track[read];
202                                 read++;
203                                 nb_packet++;
204                         } else {
205                                 break;
206                         }
207                 }
208                 cb_tracker[dev_id].next_read = read;
209                 cb_tracker[dev_id].last_remain = n_seg;
210                 return nb_packet;
211         }
212         /* Opaque data is not supported */
213         return -1;
214 }
215
216 #endif /* RTE_RAW_IOAT */