2a2c2d72026eabfab5b2aa8d1c42ba004c4e8e6e
[dpdk.git] / examples / vhost / ioat.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4
5 #include <sys/uio.h>
6 #ifdef RTE_RAW_IOAT
7 #include <rte_rawdev.h>
8 #include <rte_ioat_rawdev.h>
9
10 #include "ioat.h"
11 #include "main.h"
12
13 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
14
15 struct packet_tracker {
16         unsigned short size_track[MAX_ENQUEUED_SIZE];
17         unsigned short next_read;
18         unsigned short next_write;
19         unsigned short last_remain;
20         unsigned short ioat_space;
21 };
22
23 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
24
25 int
26 open_ioat(const char *value)
27 {
28         struct dma_for_vhost *dma_info = dma_bind;
29         char *input = strndup(value, strlen(value) + 1);
30         char *addrs = input;
31         char *ptrs[2];
32         char *start, *end, *substr;
33         int64_t vid, vring_id;
34         struct rte_ioat_rawdev_config config;
35         struct rte_rawdev_info info = { .dev_private = &config };
36         char name[32];
37         int dev_id;
38         int ret = 0;
39         uint16_t i = 0;
40         char *dma_arg[MAX_VHOST_DEVICE];
41         int args_nr;
42
43         while (isblank(*addrs))
44                 addrs++;
45         if (*addrs == '\0') {
46                 ret = -1;
47                 goto out;
48         }
49
50         /* process DMA devices within bracket. */
51         addrs++;
52         substr = strtok(addrs, ";]");
53         if (!substr) {
54                 ret = -1;
55                 goto out;
56         }
57         args_nr = rte_strsplit(substr, strlen(substr),
58                         dma_arg, MAX_VHOST_DEVICE, ',');
59         if (args_nr <= 0) {
60                 ret = -1;
61                 goto out;
62         }
63         while (i < args_nr) {
64                 char *arg_temp = dma_arg[i];
65                 uint8_t sub_nr;
66                 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
67                 if (sub_nr != 2) {
68                         ret = -1;
69                         goto out;
70                 }
71
72                 start = strstr(ptrs[0], "txd");
73                 if (start == NULL) {
74                         ret = -1;
75                         goto out;
76                 }
77
78                 start += 3;
79                 vid = strtol(start, &end, 0);
80                 if (end == start) {
81                         ret = -1;
82                         goto out;
83                 }
84
85                 vring_id = 0 + VIRTIO_RXQ;
86                 if (rte_pci_addr_parse(ptrs[1],
87                                 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
88                         ret = -1;
89                         goto out;
90                 }
91
92                 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
93                                 name, sizeof(name));
94                 dev_id = rte_rawdev_get_dev_id(name);
95                 if (dev_id == (uint16_t)(-ENODEV) ||
96                 dev_id == (uint16_t)(-EINVAL)) {
97                         ret = -1;
98                         goto out;
99                 }
100
101                 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
102                 strstr(info.driver_name, "ioat") == NULL) {
103                         ret = -1;
104                         goto out;
105                 }
106
107                 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
108                 (dma_info + vid)->dmas[vring_id].is_valid = true;
109                 config.ring_size = IOAT_RING_SIZE;
110                 config.hdls_disable = true;
111                 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
112                         ret = -1;
113                         goto out;
114                 }
115                 rte_rawdev_start(dev_id);
116                 cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE - 1;
117                 dma_info->nr++;
118                 i++;
119         }
120 out:
121         free(input);
122         return ret;
123 }
124
125 uint32_t
126 ioat_transfer_data_cb(int vid, uint16_t queue_id,
127                 struct rte_vhost_async_desc *descs,
128                 struct rte_vhost_async_status *opaque_data, uint16_t count)
129 {
130         uint32_t i_desc;
131         uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
132         struct rte_vhost_iov_iter *src = NULL;
133         struct rte_vhost_iov_iter *dst = NULL;
134         unsigned long i_seg;
135         unsigned short mask = MAX_ENQUEUED_SIZE - 1;
136         unsigned short write = cb_tracker[dev_id].next_write;
137
138         if (!opaque_data) {
139                 for (i_desc = 0; i_desc < count; i_desc++) {
140                         src = descs[i_desc].src;
141                         dst = descs[i_desc].dst;
142                         i_seg = 0;
143                         if (cb_tracker[dev_id].ioat_space < src->nr_segs)
144                                 break;
145                         while (i_seg < src->nr_segs) {
146                                 rte_ioat_enqueue_copy(dev_id,
147                                         (uintptr_t)(src->iov[i_seg].iov_base)
148                                                 + src->offset,
149                                         (uintptr_t)(dst->iov[i_seg].iov_base)
150                                                 + dst->offset,
151                                         src->iov[i_seg].iov_len,
152                                         0,
153                                         0);
154                                 i_seg++;
155                         }
156                         write &= mask;
157                         cb_tracker[dev_id].size_track[write] = src->nr_segs;
158                         cb_tracker[dev_id].ioat_space -= src->nr_segs;
159                         write++;
160                 }
161         } else {
162                 /* Opaque data is not supported */
163                 return -1;
164         }
165         /* ring the doorbell */
166         rte_ioat_perform_ops(dev_id);
167         cb_tracker[dev_id].next_write = write;
168         return i_desc;
169 }
170
171 uint32_t
172 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
173                 struct rte_vhost_async_status *opaque_data,
174                 uint16_t max_packets)
175 {
176         if (!opaque_data) {
177                 uintptr_t dump[255];
178                 int n_seg;
179                 unsigned short read, write;
180                 unsigned short nb_packet = 0;
181                 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
182                 unsigned short i;
183
184                 uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2
185                                 + VIRTIO_RXQ].dev_id;
186                 n_seg = rte_ioat_completed_ops(dev_id, 255, NULL, NULL, dump, dump);
187                 if (n_seg < 0) {
188                         RTE_LOG(ERR,
189                                 VHOST_DATA,
190                                 "fail to poll completed buf on IOAT device %u",
191                                 dev_id);
192                         return 0;
193                 }
194                 if (n_seg == 0)
195                         return 0;
196
197                 cb_tracker[dev_id].ioat_space += n_seg;
198                 n_seg += cb_tracker[dev_id].last_remain;
199
200                 read = cb_tracker[dev_id].next_read;
201                 write = cb_tracker[dev_id].next_write;
202                 for (i = 0; i < max_packets; i++) {
203                         read &= mask;
204                         if (read == write)
205                                 break;
206                         if (n_seg >= cb_tracker[dev_id].size_track[read]) {
207                                 n_seg -= cb_tracker[dev_id].size_track[read];
208                                 read++;
209                                 nb_packet++;
210                         } else {
211                                 break;
212                         }
213                 }
214                 cb_tracker[dev_id].next_read = read;
215                 cb_tracker[dev_id].last_remain = n_seg;
216                 return nb_packet;
217         }
218         /* Opaque data is not supported */
219         return -1;
220 }
221
222 #endif /* RTE_RAW_IOAT */