examples/vhost: fix string split error handling
[dpdk.git] / examples / vhost / ioat.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4 #include <rte_rawdev.h>
5 #include <rte_ioat_rawdev.h>
6 #include <sys/uio.h>
7
8 #include "ioat.h"
9 #include "main.h"
10
11 struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
12
13 struct packet_tracker {
14         unsigned short size_track[MAX_ENQUEUED_SIZE];
15         unsigned short next_read;
16         unsigned short next_write;
17         unsigned short last_remain;
18 };
19
20 struct packet_tracker cb_tracker[MAX_VHOST_DEVICE];
21
22
23 int
24 open_ioat(const char *value)
25 {
26         struct dma_for_vhost *dma_info = dma_bind;
27         char *input = strndup(value, strlen(value) + 1);
28         char *addrs = input;
29         char *ptrs[2];
30         char *start, *end, *substr;
31         int64_t vid, vring_id;
32         struct rte_ioat_rawdev_config config;
33         struct rte_rawdev_info info = { .dev_private = &config };
34         char name[32];
35         int dev_id;
36         int ret = 0;
37         uint16_t i = 0;
38         char *dma_arg[MAX_VHOST_DEVICE];
39         int args_nr;
40
41         while (isblank(*addrs))
42                 addrs++;
43         if (*addrs == '\0') {
44                 ret = -1;
45                 goto out;
46         }
47
48         /* process DMA devices within bracket. */
49         addrs++;
50         substr = strtok(addrs, ";]");
51         if (!substr) {
52                 ret = -1;
53                 goto out;
54         }
55         args_nr = rte_strsplit(substr, strlen(substr),
56                         dma_arg, MAX_VHOST_DEVICE, ',');
57         if (args_nr <= 0) {
58                 ret = -1;
59                 goto out;
60         }
61         while (i < args_nr) {
62                 char *arg_temp = dma_arg[i];
63                 uint8_t sub_nr;
64                 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
65                 if (sub_nr != 2) {
66                         ret = -1;
67                         goto out;
68                 }
69
70                 start = strstr(ptrs[0], "txd");
71                 if (start == NULL) {
72                         ret = -1;
73                         goto out;
74                 }
75
76                 start += 3;
77                 vid = strtol(start, &end, 0);
78                 if (end == start) {
79                         ret = -1;
80                         goto out;
81                 }
82
83                 vring_id = 0 + VIRTIO_RXQ;
84                 if (rte_pci_addr_parse(ptrs[1],
85                                 &(dma_info + vid)->dmas[vring_id].addr) < 0) {
86                         ret = -1;
87                         goto out;
88                 }
89
90                 rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr,
91                                 name, sizeof(name));
92                 dev_id = rte_rawdev_get_dev_id(name);
93                 if (dev_id == (uint16_t)(-ENODEV) ||
94                 dev_id == (uint16_t)(-EINVAL)) {
95                         ret = -1;
96                         goto out;
97                 }
98
99                 if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 ||
100                 strstr(info.driver_name, "ioat") == NULL) {
101                         ret = -1;
102                         goto out;
103                 }
104
105                 (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
106                 (dma_info + vid)->dmas[vring_id].is_valid = true;
107                 config.ring_size = IOAT_RING_SIZE;
108                 config.hdls_disable = true;
109                 if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) {
110                         ret = -1;
111                         goto out;
112                 }
113                 rte_rawdev_start(dev_id);
114
115                 dma_info->nr++;
116                 i++;
117         }
118 out:
119         free(input);
120         return ret;
121 }
122
123 uint32_t
124 ioat_transfer_data_cb(int vid, uint16_t queue_id,
125                 struct rte_vhost_async_desc *descs,
126                 struct rte_vhost_async_status *opaque_data, uint16_t count)
127 {
128         uint32_t i_desc;
129         int dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
130         struct rte_vhost_iov_iter *src = NULL;
131         struct rte_vhost_iov_iter *dst = NULL;
132         unsigned long i_seg;
133         unsigned short mask = MAX_ENQUEUED_SIZE - 1;
134         unsigned short write = cb_tracker[dev_id].next_write;
135
136         if (!opaque_data) {
137                 for (i_desc = 0; i_desc < count; i_desc++) {
138                         src = descs[i_desc].src;
139                         dst = descs[i_desc].dst;
140                         i_seg = 0;
141                         while (i_seg < src->nr_segs) {
142                                 /*
143                                  * TODO: Assuming that the ring space of the
144                                  * IOAT device is large enough, so there is no
145                                  * error here, and the actual error handling
146                                  * will be added later.
147                                  */
148                                 rte_ioat_enqueue_copy(dev_id,
149                                         (uintptr_t)(src->iov[i_seg].iov_base)
150                                                 + src->offset,
151                                         (uintptr_t)(dst->iov[i_seg].iov_base)
152                                                 + dst->offset,
153                                         src->iov[i_seg].iov_len,
154                                         0,
155                                         0);
156                                 i_seg++;
157                         }
158                         write &= mask;
159                         cb_tracker[dev_id].size_track[write] = i_seg;
160                         write++;
161                 }
162         } else {
163                 /* Opaque data is not supported */
164                 return -1;
165         }
166         /* ring the doorbell */
167         rte_ioat_perform_ops(dev_id);
168         cb_tracker[dev_id].next_write = write;
169         return i_desc;
170 }
171
172 uint32_t
173 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
174                 struct rte_vhost_async_status *opaque_data,
175                 uint16_t max_packets)
176 {
177         if (!opaque_data) {
178                 uintptr_t dump[255];
179                 unsigned short n_seg;
180                 unsigned short read, write;
181                 unsigned short nb_packet = 0;
182                 unsigned short mask = MAX_ENQUEUED_SIZE - 1;
183                 unsigned short i;
184                 int dev_id = dma_bind[vid].dmas[queue_id * 2
185                                 + VIRTIO_RXQ].dev_id;
186                 n_seg = rte_ioat_completed_ops(dev_id, 255, dump, dump);
187                 n_seg += cb_tracker[dev_id].last_remain;
188                 if (!n_seg)
189                         return 0;
190                 read = cb_tracker[dev_id].next_read;
191                 write = cb_tracker[dev_id].next_write;
192                 for (i = 0; i < max_packets; i++) {
193                         read &= mask;
194                         if (read == write)
195                                 break;
196                         if (n_seg >= cb_tracker[dev_id].size_track[read]) {
197                                 n_seg -= cb_tracker[dev_id].size_track[read];
198                                 read++;
199                                 nb_packet++;
200                         } else {
201                                 break;
202                         }
203                 }
204                 cb_tracker[dev_id].next_read = read;
205                 cb_tracker[dev_id].last_remain = n_seg;
206                 return nb_packet;
207         }
208         /* Opaque data is not supported */
209         return -1;
210 }