b2c74f65379f22668d576bae407fbb67882c7b3f
[dpdk.git] / examples / vhost / ioat.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4 #include <rte_rawdev.h>
5 #include <rte_ioat_rawdev.h>
6 #include <sys/uio.h>
7
8 #include "ioat.h"
9 #include "main.h"
10
11 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
12
13 struct packet_tracker {
14         unsigned short size_track[MAX_ENQUEUED_SIZE];
15         unsigned short next_read;
16         unsigned short next_write;
17         unsigned short last_remain;
18 };
19
20 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
21
22
23 int
24 open_ioat(const char *value)
25 {
26         struct dma_for_vhost *dma_info = dma_bind;
27         char *input = strndup(value, strlen(value) + 1);
28         char *addrs = input;
29         char *ptrs[2];
30         char *start, *end, *substr;
31         int64_t vid, vring_id;
32         struct rte_ioat_rawdev_config config;
33         struct rte_rawdev_info info = { .dev_private = &config };
34         char name[32];
35         int dev_id;
36         int ret = 0;
37         uint16_t i = 0;
38         char *dma_arg[MAX_VHOST_DEVICE];
39         uint8_t args_nr;
40
41         while (isblank(*addrs))
42                 addrs++;
43         if (*addrs == '\0') {
44                 ret = -1;
45                 goto out;
46         }
47
48         /* process DMA devices within bracket. */
49         addrs++;
50         substr = strtok(addrs, ";]");
51         if (!substr) {
52                 ret = -1;
53                 goto out;
54         }
55         args_nr = rte_strsplit(substr, strlen(substr),
56                         dma_arg, MAX_VHOST_DEVICE, ',');
57         do {
58                 char *arg_temp = dma_arg[i];
59                 rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
60
61                 start = strstr(ptrs[0], "txd");
62                 if (start == NULL) {
63                         ret = -1;
64                         goto out;
65                 }
66
67                 start += 3;
68                 vid = strtol(start, &end, 0);
69                 if (end == start) {
70                         ret = -1;
71                         goto out;
72                 }
73
74                 vring_id = 0 + VIRTIO_RXQ;
75                 if (rte_pci_addr_parse(ptrs[1],
76                                 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
77                         ret = -1;
78                         goto out;
79                 }
80
81                 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
82                                 name, sizeof(name));
83                 dev_id = rte_rawdev_get_dev_id(name);
84                 if (dev_id == (uint16_t)(-ENODEV) ||
85                 dev_id == (uint16_t)(-EINVAL)) {
86                         ret = -1;
87                         goto out;
88                 }
89
90                 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
91                 strstr(info.driver_name, "ioat") == NULL) {
92                         ret = -1;
93                         goto out;
94                 }
95
96                 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
97                 (dma_info + vid)->dmas[vring_id].is_valid = true;
98                 config.ring_size = IOAT_RING_SIZE;
99                 config.hdls_disable = true;
100                 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
101                         ret = -1;
102                         goto out;
103                 }
104                 rte_rawdev_start(dev_id);
105
106                 dma_info->nr++;
107                 i++;
108         } while (i < args_nr);
109 out:
110         free(input);
111         return ret;
112 }
113
114 uint32_t
115 ioat_transfer_data_cb(int vid, uint16_t queue_id,
116                 struct rte_vhost_async_desc *descs,
117                 struct rte_vhost_async_status *opaque_data, uint16_t count)
118 {
119         uint32_t i_desc;
120         int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
121         struct rte_vhost_iov_iter *src = NULL;
122         struct rte_vhost_iov_iter *dst = NULL;
123         unsigned long i_seg;
124         unsigned short mask = MAX_ENQUEUED_SIZE - 1;
125         unsigned short write = cb_tracker[dev_id].next_write;
126
127         if (!opaque_data) {
128                 for (i_desc = 0; i_desc < count; i_desc++) {
129                         src = descs[i_desc].src;
130                         dst = descs[i_desc].dst;
131                         i_seg = 0;
132                         while (i_seg < src->nr_segs) {
133                                 /*
134                                  * TODO: Assuming that the ring space of the
135                                  * IOAT device is large enough, so there is no
136                                  * error here, and the actual error handling
137                                  * will be added later.
138                                  */
139                                 rte_ioat_enqueue_copy(dev_id,
140                                         (uintptr_t)(src->iov[i_seg].iov_base)
141                                                 + src->offset,
142                                         (uintptr_t)(dst->iov[i_seg].iov_base)
143                                                 + dst->offset,
144                                         src->iov[i_seg].iov_len,
145                                         0,
146                                         0);
147                                 i_seg++;
148                         }
149                         write &= mask;
150                         cb_tracker[dev_id].size_track[write] = i_seg;
151                         write++;
152                 }
153         } else {
154                 /* Opaque data is not supported */
155                 return -1;
156         }
157         /* ring the doorbell */
158         rte_ioat_perform_ops(dev_id);
159         cb_tracker[dev_id].next_write = write;
160         return i_desc;
161 }
162
163 uint32_t
164 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
165                 struct rte_vhost_async_status *opaque_data,
166                 uint16_t max_packets)
167 {
168         if (!opaque_data) {
169                 uintptr_t dump[255];
170                 unsigned short n_seg;
171                 unsigned short read, write;
172                 unsigned short nb_packet = 0;
173                 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
174                 unsigned short i;
175                 int dev_id = dma_bind[vid].dmas[queue_id * 2
176                                 + VIRTIO_RXQ].dev_id;
177                 n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
178                 n_seg += cb_tracker[dev_id].last_remain;
179                 if (!n_seg)
180                         return 0;
181                 read = cb_tracker[dev_id].next_read;
182                 write = cb_tracker[dev_id].next_write;
183                 for (i = 0; i < max_packets; i++) {
184                         read &= mask;
185                         if (read == write)
186                                 break;
187                         if (n_seg >= cb_tracker[dev_id].size_track[read]) {
188                                 n_seg -= cb_tracker[dev_id].size_track[read];
189                                 read++;
190                                 nb_packet++;
191                         } else {
192                                 break;
193                         }
194                 }
195                 cb_tracker[dev_id].next_read = read;
196                 cb_tracker[dev_id].last_remain = n_seg;
197                 return nb_packet;
198         }
199         /* Opaque data is not supported */
200         return -1;
201 }