examples/vhost_blk: fix build with gcc 10
[dpdk.git] / examples / vhost_blk / vhost_blk.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2019 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <unistd.h>
7 #include <stdbool.h>
8 #include <signal.h>
9 #include <assert.h>
10 #include <semaphore.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/virtio_ring.h>
13
14 #include <rte_atomic.h>
15 #include <rte_cycles.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_vhost.h>
19
20 #include "vhost_blk.h"
21 #include "blk_spec.h"
22
23 #define VIRTQ_DESC_F_NEXT       1
24 #define VIRTQ_DESC_F_AVAIL      (1 << 7)
25 #define VIRTQ_DESC_F_USED       (1 << 15)
26
27 #define MAX_TASK                12
28
29 #define VHOST_BLK_FEATURES ((1ULL << VIRTIO_F_RING_PACKED) | \
30                             (1ULL << VIRTIO_F_VERSION_1) |\
31                             (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
32                             (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
33
34 struct vhost_blk_ctrlr *g_vhost_ctrlr;
35
36 /* Path to folder where character device will be created. Can be set by user. */
37 static char dev_pathname[PATH_MAX] = "";
38 static sem_t exit_sem;
39 static int g_should_stop = -1;
40
41 struct vhost_blk_ctrlr *
42 vhost_blk_ctrlr_find(const char *ctrlr_name)
43 {
44         if (ctrlr_name == NULL)
45                 return NULL;
46
47         /* currently we only support 1 socket file fd */
48         return g_vhost_ctrlr;
49 }
50
51 static uint64_t gpa_to_vva(int vid, uint64_t gpa, uint64_t *len)
52 {
53         char path[PATH_MAX];
54         struct vhost_blk_ctrlr *ctrlr;
55         int ret = 0;
56
57         ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
58         if (ret) {
59                 fprintf(stderr, "Cannot get socket name\n");
60                 assert(ret != 0);
61         }
62
63         ctrlr = vhost_blk_ctrlr_find(path);
64         if (!ctrlr) {
65                 fprintf(stderr, "Controller is not ready\n");
66                 assert(ctrlr != NULL);
67         }
68
69         assert(ctrlr->mem != NULL);
70
71         return rte_vhost_va_from_guest_pa(ctrlr->mem, gpa, len);
72 }
73
74 static struct vring_packed_desc *
75 descriptor_get_next_packed(struct rte_vhost_vring *vq,
76                              uint16_t *idx)
77 {
78         if (vq->desc_packed[*idx % vq->size].flags & VIRTQ_DESC_F_NEXT) {
79                 *idx += 1;
80                 return &vq->desc_packed[*idx % vq->size];
81         }
82
83         return NULL;
84 }
85
86 static bool
87 descriptor_has_next_packed(struct vring_packed_desc *cur_desc)
88 {
89         return !!(cur_desc->flags & VRING_DESC_F_NEXT);
90 }
91
92 static bool
93 descriptor_is_wr_packed(struct vring_packed_desc *cur_desc)
94 {
95         return !!(cur_desc->flags & VRING_DESC_F_WRITE);
96 }
97
98 static struct rte_vhost_inflight_desc_packed *
99 inflight_desc_get_next(struct rte_vhost_inflight_info_packed *inflight_packed,
100                                struct rte_vhost_inflight_desc_packed *cur_desc)
101 {
102         if (!!(cur_desc->flags & VIRTQ_DESC_F_NEXT))
103                 return &inflight_packed->desc[cur_desc->next];
104
105         return NULL;
106 }
107
108 static bool
109 inflight_desc_has_next(struct rte_vhost_inflight_desc_packed *cur_desc)
110 {
111         return !!(cur_desc->flags & VRING_DESC_F_NEXT);
112 }
113
114 static bool
115 inflight_desc_is_wr(struct rte_vhost_inflight_desc_packed *cur_desc)
116 {
117         return !!(cur_desc->flags & VRING_DESC_F_WRITE);
118 }
119
120 static void
121 inflight_process_payload_chain_packed(struct inflight_blk_task *task)
122 {
123         void *data;
124         uint64_t chunck_len;
125         struct vhost_blk_task *blk_task;
126         struct rte_vhost_inflight_desc_packed *desc;
127
128         blk_task = &task->blk_task;
129         blk_task->iovs_cnt = 0;
130
131         do {
132                 desc = task->inflight_desc;
133                 chunck_len = desc->len;
134                 data = (void *)(uintptr_t)gpa_to_vva(blk_task->bdev->vid,
135                                                      desc->addr,
136                                                      &chunck_len);
137                 if (!data || chunck_len != desc->len) {
138                         fprintf(stderr, "failed to translate desc address.\n");
139                         return;
140                 }
141
142                 blk_task->iovs[blk_task->iovs_cnt].iov_base = data;
143                 blk_task->iovs[blk_task->iovs_cnt].iov_len = desc->len;
144                 blk_task->data_len += desc->len;
145                 blk_task->iovs_cnt++;
146                 task->inflight_desc = inflight_desc_get_next(
147                                         task->inflight_packed, desc);
148         } while (inflight_desc_has_next(task->inflight_desc));
149
150         chunck_len = task->inflight_desc->len;
151         blk_task->status = (void *)(uintptr_t)gpa_to_vva(
152                 blk_task->bdev->vid, task->inflight_desc->addr, &chunck_len);
153         if (!blk_task->status || chunck_len != task->inflight_desc->len)
154                 fprintf(stderr, "failed to translate desc address.\n");
155 }
156
157 static void
158 inflight_submit_completion_packed(struct inflight_blk_task *task,
159                                               uint32_t q_idx, uint16_t *used_id,
160                                               bool *used_wrap_counter)
161 {
162         struct vhost_blk_ctrlr *ctrlr;
163         struct rte_vhost_vring *vq;
164         struct vring_packed_desc *desc;
165         int ret;
166
167         ctrlr = vhost_blk_ctrlr_find(dev_pathname);
168         vq = task->blk_task.vq;
169
170         ret = rte_vhost_set_last_inflight_io_packed(ctrlr->bdev->vid, q_idx,
171                                                     task->blk_task.head_idx);
172         if (ret != 0)
173                 fprintf(stderr, "failed to set last inflight io\n");
174
175         desc = &vq->desc_packed[*used_id];
176         desc->id = task->blk_task.buffer_id;
177         rte_smp_mb();
178         if (*used_wrap_counter)
179                 desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
180         else
181                 desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
182         rte_smp_mb();
183
184         *used_id += task->blk_task.iovs_cnt + 2;
185         if (*used_id >= vq->size) {
186                 *used_id -= vq->size;
187                 *used_wrap_counter = !(*used_wrap_counter);
188         }
189
190         ret = rte_vhost_clr_inflight_desc_packed(ctrlr->bdev->vid, q_idx,
191                                                  task->blk_task.head_idx);
192         if (ret != 0)
193                 fprintf(stderr, "failed to clear inflight io\n");
194
195         /* Send an interrupt back to the guest VM so that it knows
196          * a completion is ready to be processed.
197          */
198         rte_vhost_vring_call(task->blk_task.bdev->vid, q_idx);
199 }
200
201 static void
202 submit_completion_packed(struct vhost_blk_task *task, uint32_t q_idx,
203                                   uint16_t *used_id, bool *used_wrap_counter)
204 {
205         struct vhost_blk_ctrlr *ctrlr;
206         struct rte_vhost_vring *vq;
207         struct vring_packed_desc *desc;
208         int ret;
209
210         ctrlr = vhost_blk_ctrlr_find(dev_pathname);
211         vq = task->vq;
212
213         ret = rte_vhost_set_last_inflight_io_packed(ctrlr->bdev->vid, q_idx,
214                                                     task->inflight_idx);
215         if (ret != 0)
216                 fprintf(stderr, "failed to set last inflight io\n");
217
218         desc = &vq->desc_packed[*used_id];
219         desc->id = task->buffer_id;
220         rte_smp_mb();
221         if (*used_wrap_counter)
222                 desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
223         else
224                 desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
225         rte_smp_mb();
226
227         *used_id += task->iovs_cnt + 2;
228         if (*used_id >= vq->size) {
229                 *used_id -= vq->size;
230                 *used_wrap_counter = !(*used_wrap_counter);
231         }
232
233         ret = rte_vhost_clr_inflight_desc_packed(ctrlr->bdev->vid, q_idx,
234                                                  task->inflight_idx);
235         if (ret != 0)
236                 fprintf(stderr, "failed to clear inflight io\n");
237
238         /* Send an interrupt back to the guest VM so that it knows
239          * a completion is ready to be processed.
240          */
241         rte_vhost_vring_call(task->bdev->vid, q_idx);
242 }
243
244 static void
245 vhost_process_payload_chain_packed(struct vhost_blk_task *task,
246         uint16_t *idx)
247 {
248         void *data;
249         uint64_t chunck_len;
250
251         task->iovs_cnt = 0;
252
253         do {
254                 chunck_len = task->desc_packed->len;
255                 data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
256                                                      task->desc_packed->addr,
257                                                          &chunck_len);
258                 if (!data || chunck_len != task->desc_packed->len) {
259                         fprintf(stderr, "failed to translate desc address.\n");
260                         return;
261                 }
262
263                 task->iovs[task->iovs_cnt].iov_base = data;
264                 task->iovs[task->iovs_cnt].iov_len = task->desc_packed->len;
265                 task->data_len += task->desc_packed->len;
266                 task->iovs_cnt++;
267                 task->desc_packed = descriptor_get_next_packed(task->vq, idx);
268         } while (descriptor_has_next_packed(task->desc_packed));
269
270         task->last_idx = *idx % task->vq->size;
271         chunck_len = task->desc_packed->len;
272         task->status = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
273                                                    task->desc_packed->addr,
274                                                    &chunck_len);
275         if (!task->status || chunck_len != task->desc_packed->len)
276                 fprintf(stderr, "failed to translate desc address.\n");
277 }
278
279
280 static int
281 descriptor_is_available(struct rte_vhost_vring *vring, uint16_t idx,
282                                         bool avail_wrap_counter)
283 {
284         uint16_t flags = vring->desc_packed[idx].flags;
285
286         return ((!!(flags & VIRTQ_DESC_F_AVAIL) == avail_wrap_counter) &&
287                 (!!(flags & VIRTQ_DESC_F_USED) != avail_wrap_counter));
288 }
289
290 static void
291 process_requestq_packed(struct vhost_blk_ctrlr *ctrlr, uint32_t q_idx)
292 {
293         bool avail_wrap_counter, used_wrap_counter;
294         uint16_t avail_idx, used_idx;
295         int ret;
296         uint64_t chunck_len;
297         struct vhost_blk_queue *blk_vq;
298         struct rte_vhost_vring *vq;
299         struct vhost_blk_task *task;
300
301         blk_vq = &ctrlr->bdev->queues[q_idx];
302         vq = &blk_vq->vq;
303
304         avail_idx = blk_vq->last_avail_idx;
305         avail_wrap_counter = blk_vq->avail_wrap_counter;
306         used_idx = blk_vq->last_used_idx;
307         used_wrap_counter = blk_vq->used_wrap_counter;
308
309         task = rte_zmalloc(NULL, sizeof(*task), 0);
310         assert(task != NULL);
311         task->vq = vq;
312         task->bdev = ctrlr->bdev;
313
314         while (descriptor_is_available(vq, avail_idx, avail_wrap_counter)) {
315                 task->head_idx = avail_idx;
316                 task->desc_packed = &task->vq->desc_packed[task->head_idx];
317                 task->iovs_cnt = 0;
318                 task->data_len = 0;
319                 task->req = NULL;
320                 task->status = NULL;
321
322                 /* does not support indirect descriptors */
323                 assert((task->desc_packed->flags & VRING_DESC_F_INDIRECT) == 0);
324
325                 chunck_len = task->desc_packed->len;
326                 task->req = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
327                         task->desc_packed->addr, &chunck_len);
328                 if (!task->req || chunck_len != task->desc_packed->len) {
329                         fprintf(stderr, "failed to translate desc address.\n");
330                         rte_free(task);
331                         return;
332                 }
333
334                 task->desc_packed = descriptor_get_next_packed(task->vq,
335                                                                 &avail_idx);
336                 assert(task->desc_packed != NULL);
337                 if (!descriptor_has_next_packed(task->desc_packed)) {
338                         task->dxfer_dir = BLK_DIR_NONE;
339                         task->last_idx = avail_idx % vq->size;
340                         chunck_len = task->desc_packed->len;
341                         task->status = (void *)(uintptr_t)
342                                               gpa_to_vva(task->bdev->vid,
343                                                         task->desc_packed->addr,
344                                                         &chunck_len);
345                         if (!task->status ||
346                                 chunck_len != task->desc_packed->len) {
347                                 fprintf(stderr,
348                                         "failed to translate desc address.\n");
349                                 rte_free(task);
350                                 return;
351                         }
352                 } else {
353                         task->readtype = descriptor_is_wr_packed(
354                                                         task->desc_packed);
355                         vhost_process_payload_chain_packed(task, &avail_idx);
356                 }
357                 task->buffer_id = vq->desc_packed[task->last_idx].id;
358                 rte_vhost_set_inflight_desc_packed(ctrlr->bdev->vid, q_idx,
359                                                    task->head_idx,
360                                                    task->last_idx,
361                                                    &task->inflight_idx);
362
363                 if (++avail_idx >= vq->size) {
364                         avail_idx -= vq->size;
365                         avail_wrap_counter = !avail_wrap_counter;
366                 }
367                 blk_vq->last_avail_idx = avail_idx;
368                 blk_vq->avail_wrap_counter = avail_wrap_counter;
369
370                 ret = vhost_bdev_process_blk_commands(ctrlr->bdev, task);
371                 if (ret) {
372                         /* invalid response */
373                         *task->status = VIRTIO_BLK_S_IOERR;
374                 } else {
375                         /* successfully */
376                         *task->status = VIRTIO_BLK_S_OK;
377                 }
378
379                 submit_completion_packed(task, q_idx, &used_idx,
380                                                 &used_wrap_counter);
381                 blk_vq->last_used_idx = used_idx;
382                 blk_vq->used_wrap_counter = used_wrap_counter;
383         }
384
385         rte_free(task);
386 }
387
388 static void
389 submit_inflight_vq_packed(struct vhost_blk_ctrlr *ctrlr,
390         uint16_t q_idx)
391 {
392         bool used_wrap_counter;
393         int req_idx, ret;
394         uint16_t used_idx;
395         uint64_t chunck_len;
396         struct vhost_blk_queue *blk_vq;
397         struct rte_vhost_ring_inflight *inflight_vq;
398         struct rte_vhost_resubmit_info *resubmit_info;
399         struct rte_vhost_vring *vq;
400         struct inflight_blk_task *task;
401         struct vhost_blk_task *blk_task;
402         struct rte_vhost_inflight_info_packed *inflight_info;
403
404         blk_vq = &ctrlr->bdev->queues[q_idx];
405         vq = &blk_vq->vq;
406         inflight_vq = &blk_vq->inflight_vq;
407         resubmit_info = inflight_vq->resubmit_inflight;
408         inflight_info = inflight_vq->inflight_packed;
409         used_idx = blk_vq->last_used_idx;
410         used_wrap_counter = blk_vq->used_wrap_counter;
411
412         task = rte_malloc(NULL, sizeof(*task), 0);
413         if (!task) {
414                 fprintf(stderr, "failed to allocate memory\n");
415                 return;
416         }
417         blk_task = &task->blk_task;
418         blk_task->vq = vq;
419         blk_task->bdev = ctrlr->bdev;
420         task->inflight_packed = inflight_vq->inflight_packed;
421
422         while (resubmit_info->resubmit_num-- > 0) {
423                 req_idx = resubmit_info->resubmit_num;
424                 blk_task->head_idx =
425                         resubmit_info->resubmit_list[req_idx].index;
426                 task->inflight_desc =
427                         &inflight_info->desc[blk_task->head_idx];
428                 task->blk_task.iovs_cnt = 0;
429                 task->blk_task.data_len = 0;
430                 task->blk_task.req = NULL;
431                 task->blk_task.status = NULL;
432
433                 /* update the avail idx too
434                  * as it's initial value equals to used idx
435                  */
436                 blk_vq->last_avail_idx += task->inflight_desc->num;
437                 if (blk_vq->last_avail_idx >= vq->size) {
438                         blk_vq->last_avail_idx -= vq->size;
439                         blk_vq->avail_wrap_counter =
440                                 !blk_vq->avail_wrap_counter;
441                 }
442
443                 /* does not support indirect descriptors */
444                 assert(task->inflight_desc != NULL);
445                 assert((task->inflight_desc->flags &
446                         VRING_DESC_F_INDIRECT) == 0);
447
448                 chunck_len = task->inflight_desc->len;
449                 blk_task->req = (void *)(uintptr_t)
450                                      gpa_to_vva(blk_task->bdev->vid,
451                                                 task->inflight_desc->addr,
452                                                 &chunck_len);
453                 if (!blk_task->req ||
454                         chunck_len != task->inflight_desc->len) {
455                         fprintf(stderr, "failed to translate desc address.\n");
456                         rte_free(task);
457                         return;
458                 }
459
460                 task->inflight_desc = inflight_desc_get_next(
461                         task->inflight_packed, task->inflight_desc);
462                 assert(task->inflight_desc != NULL);
463                 if (!inflight_desc_has_next(task->inflight_desc)) {
464                         blk_task->dxfer_dir = BLK_DIR_NONE;
465                         chunck_len = task->inflight_desc->len;
466                         blk_task->status = (void *)(uintptr_t)
467                                 gpa_to_vva(blk_task->bdev->vid,
468                                                 task->inflight_desc->addr,
469                                                 &chunck_len);
470                         if (!blk_task->status ||
471                             chunck_len != task->inflight_desc->len) {
472                                 fprintf(stderr,
473                                         "failed to translate desc address.\n");
474                                 rte_free(task);
475                                 return;
476                         }
477                 } else {
478                         blk_task->readtype =
479                         inflight_desc_is_wr(task->inflight_desc);
480                         inflight_process_payload_chain_packed(task);
481                 }
482
483                 blk_task->buffer_id = task->inflight_desc->id;
484
485                 ret = vhost_bdev_process_blk_commands(ctrlr->bdev, blk_task);
486                 if (ret)
487                         /* invalid response */
488                         *blk_task->status = VIRTIO_BLK_S_IOERR;
489                 else
490                         /* successfully */
491                         *blk_task->status = VIRTIO_BLK_S_OK;
492
493                 inflight_submit_completion_packed(task, q_idx, &used_idx,
494                                                   &used_wrap_counter);
495
496                 blk_vq->last_used_idx = used_idx;
497                 blk_vq->used_wrap_counter = used_wrap_counter;
498         }
499
500         rte_free(task);
501 }
502
503 static struct vring_desc *
504 descriptor_get_next_split(struct vring_desc *vq_desc,
505                                    struct vring_desc *cur_desc)
506 {
507         return &vq_desc[cur_desc->next];
508 }
509
510 static bool
511 descriptor_has_next_split(struct vring_desc *cur_desc)
512 {
513         return !!(cur_desc->flags & VRING_DESC_F_NEXT);
514 }
515
516 static bool
517 descriptor_is_wr_split(struct vring_desc *cur_desc)
518 {
519         return !!(cur_desc->flags & VRING_DESC_F_WRITE);
520 }
521
522 static void
523 vhost_process_payload_chain_split(struct vhost_blk_task *task)
524 {
525         void *data;
526         uint64_t chunck_len;
527
528         task->iovs_cnt = 0;
529
530         do {
531                 chunck_len = task->desc_split->len;
532                 data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
533                                                      task->desc_split->addr,
534                                                      &chunck_len);
535                 if (!data || chunck_len != task->desc_split->len) {
536                         fprintf(stderr, "failed to translate desc address.\n");
537                         return;
538                 }
539
540                 task->iovs[task->iovs_cnt].iov_base = data;
541                 task->iovs[task->iovs_cnt].iov_len = task->desc_split->len;
542                 task->data_len += task->desc_split->len;
543                 task->iovs_cnt++;
544                 task->desc_split =
545                 descriptor_get_next_split(task->vq->desc, task->desc_split);
546         } while (descriptor_has_next_split(task->desc_split));
547
548         chunck_len = task->desc_split->len;
549         task->status = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
550                                                      task->desc_split->addr,
551                                                      &chunck_len);
552         if (!task->status || chunck_len != task->desc_split->len)
553                 fprintf(stderr, "failed to translate desc address.\n");
554 }
555
556 static void
557 submit_completion_split(struct vhost_blk_task *task, uint32_t vid,
558         uint32_t q_idx)
559 {
560         struct rte_vhost_vring *vq;
561         struct vring_used *used;
562
563         vq = task->vq;
564         used = vq->used;
565
566         rte_vhost_set_last_inflight_io_split(vid, q_idx, task->req_idx);
567
568         /* Fill out the next entry in the "used" ring.  id = the
569          * index of the descriptor that contained the blk request.
570          * len = the total amount of data transferred for the blk
571          * request. We must report the correct len, for variable
572          * length blk CDBs, where we may return less data than
573          * allocated by the guest VM.
574          */
575         used->ring[used->idx & (vq->size - 1)].id = task->req_idx;
576         used->ring[used->idx & (vq->size - 1)].len = task->data_len;
577         rte_smp_mb();
578         used->idx++;
579         rte_smp_mb();
580
581         rte_vhost_clr_inflight_desc_split(vid, q_idx, used->idx, task->req_idx);
582
583         /* Send an interrupt back to the guest VM so that it knows
584          * a completion is ready to be processed.
585          */
586         rte_vhost_vring_call(task->bdev->vid, q_idx);
587 }
588
589 static void
590 submit_inflight_vq_split(struct vhost_blk_ctrlr *ctrlr,
591         uint32_t q_idx)
592 {
593         struct vhost_blk_queue *blk_vq;
594         struct rte_vhost_ring_inflight *inflight_vq;
595         struct rte_vhost_resubmit_info *resubmit_inflight;
596         struct rte_vhost_resubmit_desc *resubmit_list;
597         struct vhost_blk_task *task;
598         int req_idx;
599         uint64_t chunck_len;
600         int ret;
601
602         blk_vq = &ctrlr->bdev->queues[q_idx];
603         inflight_vq = &blk_vq->inflight_vq;
604         resubmit_inflight = inflight_vq->resubmit_inflight;
605         resubmit_list = resubmit_inflight->resubmit_list;
606
607         task = rte_zmalloc(NULL, sizeof(*task), 0);
608         assert(task != NULL);
609
610         task->ctrlr = ctrlr;
611         task->bdev = ctrlr->bdev;
612         task->vq = &blk_vq->vq;
613
614         while (resubmit_inflight->resubmit_num-- > 0) {
615                 req_idx = resubmit_list[resubmit_inflight->resubmit_num].index;
616                 task->req_idx = req_idx;
617                 task->desc_split = &task->vq->desc[task->req_idx];
618                 task->iovs_cnt = 0;
619                 task->data_len = 0;
620                 task->req = NULL;
621                 task->status = NULL;
622
623                 /* does not support indirect descriptors */
624                 assert(task->desc_split != NULL);
625                 assert((task->desc_split->flags & VRING_DESC_F_INDIRECT) == 0);
626
627                 chunck_len = task->desc_split->len;
628                 task->req = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
629                                 task->desc_split->addr, &chunck_len);
630                 if (!task->req || chunck_len != task->desc_split->len) {
631                         fprintf(stderr, "failed to translate desc address.\n");
632                         rte_free(task);
633                         return;
634                 }
635
636                 task->desc_split = descriptor_get_next_split(task->vq->desc,
637                                                              task->desc_split);
638                 if (!descriptor_has_next_split(task->desc_split)) {
639                         task->dxfer_dir = BLK_DIR_NONE;
640                         chunck_len = task->desc_split->len;
641                         task->status = (void *)(uintptr_t)
642                                        gpa_to_vva(task->bdev->vid,
643                                                   task->desc_split->addr,
644                                                   &chunck_len);
645                         if (!task->status ||
646                                 chunck_len != task->desc_split->len) {
647                                 fprintf(stderr,
648                                         "failed to translate desc address.\n");
649                                 rte_free(task);
650                                 return;
651                         }
652                 } else {
653                         task->readtype =
654                                 descriptor_is_wr_split(task->desc_split);
655                         vhost_process_payload_chain_split(task);
656                 }
657
658                 ret = vhost_bdev_process_blk_commands(ctrlr->bdev, task);
659                 if (ret) {
660                         /* invalid response */
661                         *task->status = VIRTIO_BLK_S_IOERR;
662                 } else {
663                         /* successfully */
664                         *task->status = VIRTIO_BLK_S_OK;
665                 }
666                 submit_completion_split(task, ctrlr->bdev->vid, q_idx);
667         }
668
669         rte_free(task);
670 }
671
672 static void
673 process_requestq_split(struct vhost_blk_ctrlr *ctrlr, uint32_t q_idx)
674 {
675         int ret;
676         int req_idx;
677         uint16_t last_idx;
678         uint64_t chunck_len;
679         struct vhost_blk_queue *blk_vq;
680         struct rte_vhost_vring *vq;
681         struct vhost_blk_task *task;
682
683         blk_vq = &ctrlr->bdev->queues[q_idx];
684         vq = &blk_vq->vq;
685
686         task = rte_zmalloc(NULL, sizeof(*task), 0);
687         assert(task != NULL);
688         task->ctrlr = ctrlr;
689         task->bdev = ctrlr->bdev;
690         task->vq = vq;
691
692         while (vq->avail->idx != blk_vq->last_avail_idx) {
693                 last_idx = blk_vq->last_avail_idx & (vq->size - 1);
694                 req_idx = vq->avail->ring[last_idx];
695                 task->req_idx = req_idx;
696                 task->desc_split = &task->vq->desc[task->req_idx];
697                 task->iovs_cnt = 0;
698                 task->data_len = 0;
699                 task->req = NULL;
700                 task->status = NULL;
701
702                 rte_vhost_set_inflight_desc_split(ctrlr->bdev->vid, q_idx,
703                                                         task->req_idx);
704
705                 /* does not support indirect descriptors */
706                 assert((task->desc_split->flags & VRING_DESC_F_INDIRECT) == 0);
707
708                 chunck_len = task->desc_split->len;
709                 task->req = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
710                                 task->desc_split->addr, &chunck_len);
711                 if (!task->req || chunck_len != task->desc_split->len) {
712                         fprintf(stderr, "failed to translate desc address.\n");
713                         rte_free(task);
714                         return;
715                 }
716
717                 task->desc_split = descriptor_get_next_split(task->vq->desc,
718                                                              task->desc_split);
719                 if (!descriptor_has_next_split(task->desc_split)) {
720                         task->dxfer_dir = BLK_DIR_NONE;
721                         chunck_len = task->desc_split->len;
722                         task->status = (void *)(uintptr_t)
723                                               gpa_to_vva(task->bdev->vid,
724                                                          task->desc_split->addr,
725                                                          &chunck_len);
726                         if (!task->status ||
727                                 chunck_len != task->desc_split->len) {
728                                 fprintf(stderr,
729                                         "failed to translate desc address.\n");
730                                 rte_free(task);
731                                 return;
732                         }
733                 } else {
734                         task->readtype =
735                                 descriptor_is_wr_split(task->desc_split);
736                         vhost_process_payload_chain_split(task);
737                 }
738                 blk_vq->last_avail_idx++;
739
740                 ret = vhost_bdev_process_blk_commands(ctrlr->bdev, task);
741                 if (ret) {
742                         /* invalid response */
743                         *task->status = VIRTIO_BLK_S_IOERR;
744                 } else {
745                         /* successfully */
746                         *task->status = VIRTIO_BLK_S_OK;
747                 }
748
749                 submit_completion_split(task, ctrlr->bdev->vid, q_idx);
750         }
751
752         rte_free(task);
753 }
754
755 static void *
756 ctrlr_worker(void *arg)
757 {
758         struct vhost_blk_ctrlr *ctrlr = (struct vhost_blk_ctrlr *)arg;
759         struct vhost_blk_queue *blk_vq;
760         struct rte_vhost_ring_inflight *inflight_vq;
761         cpu_set_t cpuset;
762         pthread_t thread;
763         int i;
764
765         fprintf(stdout, "Ctrlr Worker Thread start\n");
766
767         if (ctrlr == NULL || ctrlr->bdev == NULL) {
768                 fprintf(stderr,
769                         "%s: Error, invalid argument passed to worker thread\n",
770                         __func__);
771                 exit(0);
772         }
773
774         thread = pthread_self();
775         CPU_ZERO(&cpuset);
776         CPU_SET(0, &cpuset);
777         pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
778
779         for (i = 0; i < NUM_OF_BLK_QUEUES; i++) {
780                 blk_vq = &ctrlr->bdev->queues[i];
781                 inflight_vq = &blk_vq->inflight_vq;
782                 if (inflight_vq->resubmit_inflight != NULL &&
783                     inflight_vq->resubmit_inflight->resubmit_num != 0) {
784                         if (ctrlr->packed_ring)
785                                 submit_inflight_vq_packed(ctrlr, i);
786                         else
787                                 submit_inflight_vq_split(ctrlr, i);
788                 }
789         }
790
791         while (!g_should_stop && ctrlr->bdev != NULL) {
792                 for (i = 0; i < NUM_OF_BLK_QUEUES; i++) {
793                         if (ctrlr->packed_ring)
794                                 process_requestq_packed(ctrlr, i);
795                         else
796                                 process_requestq_split(ctrlr, i);
797                 }
798         }
799
800         g_should_stop = 2;
801         fprintf(stdout, "Ctrlr Worker Thread Exiting\n");
802         sem_post(&exit_sem);
803         return NULL;
804 }
805
806 static int
807 new_device(int vid)
808 {
809         struct vhost_blk_ctrlr *ctrlr;
810         struct vhost_blk_queue *blk_vq;
811         struct rte_vhost_vring *vq;
812         uint64_t features;
813         pthread_t tid;
814         int i, ret;
815
816         ctrlr = vhost_blk_ctrlr_find(dev_pathname);
817         if (!ctrlr) {
818                 fprintf(stderr, "Controller is not ready\n");
819                 return -1;
820         }
821
822         if (ctrlr->started)
823                 return 0;
824
825         ctrlr->bdev->vid = vid;
826         ret = rte_vhost_get_negotiated_features(vid, &features);
827         if (ret) {
828                 fprintf(stderr, "failed to get the negotiated features\n");
829                 return -1;
830         }
831         ctrlr->packed_ring = !!(features & (1ULL << VIRTIO_F_RING_PACKED));
832
833         ret = rte_vhost_get_mem_table(vid, &ctrlr->mem);
834         if (ret)
835                 fprintf(stderr, "Get Controller memory region failed\n");
836         assert(ctrlr->mem != NULL);
837
838         /* Disable Notifications and init last idx */
839         for (i = 0; i < NUM_OF_BLK_QUEUES; i++) {
840                 blk_vq = &ctrlr->bdev->queues[i];
841                 vq = &blk_vq->vq;
842
843                 ret = rte_vhost_get_vhost_vring(ctrlr->bdev->vid, i, vq);
844                 assert(ret == 0);
845
846                 ret = rte_vhost_get_vring_base(ctrlr->bdev->vid, i,
847                                                &blk_vq->last_avail_idx,
848                                                &blk_vq->last_used_idx);
849                 assert(ret == 0);
850
851                 ret = rte_vhost_get_vhost_ring_inflight(ctrlr->bdev->vid, i,
852                                                         &blk_vq->inflight_vq);
853                 assert(ret == 0);
854
855                 if (ctrlr->packed_ring) {
856                         /* for the reconnection */
857                         ret = rte_vhost_get_vring_base_from_inflight(
858                                 ctrlr->bdev->vid, i,
859                                 &blk_vq->last_avail_idx,
860                                 &blk_vq->last_used_idx);
861                         assert(ret == 0);
862
863                         blk_vq->avail_wrap_counter = blk_vq->last_avail_idx &
864                                 (1 << 15);
865                         blk_vq->last_avail_idx = blk_vq->last_avail_idx &
866                                 0x7fff;
867                         blk_vq->used_wrap_counter = blk_vq->last_used_idx &
868                                 (1 << 15);
869                         blk_vq->last_used_idx = blk_vq->last_used_idx &
870                                 0x7fff;
871                 }
872
873                 rte_vhost_enable_guest_notification(vid, i, 0);
874         }
875
876         /* start polling vring */
877         g_should_stop = 0;
878         fprintf(stdout, "New Device %s, Device ID %d\n", dev_pathname, vid);
879         if (pthread_create(&tid, NULL, &ctrlr_worker, ctrlr) < 0) {
880                 fprintf(stderr, "Worker Thread Started Failed\n");
881                 return -1;
882         }
883
884         /* device has been started */
885         ctrlr->started = 1;
886         pthread_detach(tid);
887         return 0;
888 }
889
890 static void
891 destroy_device(int vid)
892 {
893         char path[PATH_MAX];
894         struct vhost_blk_ctrlr *ctrlr;
895         struct vhost_blk_queue *blk_vq;
896         int i, ret;
897
898         ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
899         if (ret) {
900                 fprintf(stderr, "Destroy Ctrlr Failed\n");
901                 return;
902         }
903
904         fprintf(stdout, "Destroy %s Device ID %d\n", path, vid);
905         ctrlr = vhost_blk_ctrlr_find(path);
906         if (!ctrlr) {
907                 fprintf(stderr, "Destroy Ctrlr Failed\n");
908                 return;
909         }
910
911         if (!ctrlr->started)
912                 return;
913
914         g_should_stop = 1;
915         while (g_should_stop != 2)
916                 ;
917
918         for (i = 0; i < NUM_OF_BLK_QUEUES; i++) {
919                 blk_vq = &ctrlr->bdev->queues[i];
920                 if (ctrlr->packed_ring) {
921                         blk_vq->last_avail_idx |= (blk_vq->avail_wrap_counter <<
922                                 15);
923                         blk_vq->last_used_idx |= (blk_vq->used_wrap_counter <<
924                                 15);
925                 }
926                 rte_vhost_set_vring_base(ctrlr->bdev->vid, i,
927                                          blk_vq->last_avail_idx,
928                                          blk_vq->last_used_idx);
929         }
930
931         free(ctrlr->mem);
932
933         ctrlr->started = 0;
934         sem_wait(&exit_sem);
935 }
936
937 static int
938 new_connection(int vid)
939 {
940         /* extend the proper features for block device */
941         vhost_session_install_rte_compat_hooks(vid);
942
943         return 0;
944 }
945
946 struct vhost_device_ops vhost_blk_device_ops = {
947         .new_device =  new_device,
948         .destroy_device = destroy_device,
949         .new_connection = new_connection,
950 };
951
952 static struct vhost_block_dev *
953 vhost_blk_bdev_construct(const char *bdev_name,
954         const char *bdev_serial, uint32_t blk_size, uint64_t blk_cnt,
955         bool wce_enable)
956 {
957         struct vhost_block_dev *bdev;
958
959         bdev = rte_zmalloc(NULL, sizeof(*bdev), RTE_CACHE_LINE_SIZE);
960         if (!bdev)
961                 return NULL;
962
963         strncpy(bdev->name, bdev_name, sizeof(bdev->name));
964         strncpy(bdev->product_name, bdev_serial, sizeof(bdev->product_name));
965         bdev->blocklen = blk_size;
966         bdev->blockcnt = blk_cnt;
967         bdev->write_cache = wce_enable;
968
969         fprintf(stdout, "blocklen=%d, blockcnt=%"PRIx64"\n", bdev->blocklen,
970                 bdev->blockcnt);
971
972         /* use memory as disk storage space */
973         bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0);
974         if (!bdev->data) {
975                 fprintf(stderr, "no enough reserved huge memory for disk\n");
976                 free(bdev);
977                 return NULL;
978         }
979
980         return bdev;
981 }
982
983 static struct vhost_blk_ctrlr *
984 vhost_blk_ctrlr_construct(const char *ctrlr_name)
985 {
986         int ret;
987         struct vhost_blk_ctrlr *ctrlr;
988         char *path;
989         char cwd[PATH_MAX];
990
991         /* always use current directory */
992         path = getcwd(cwd, PATH_MAX);
993         if (!path) {
994                 fprintf(stderr, "Cannot get current working directory\n");
995                 return NULL;
996         }
997         snprintf(dev_pathname, sizeof(dev_pathname), "%s/%s", path, ctrlr_name);
998
999         unlink(dev_pathname);
1000
1001         if (rte_vhost_driver_register(dev_pathname, 0) != 0) {
1002                 fprintf(stderr, "socket %s already exists\n", dev_pathname);
1003                 return NULL;
1004         }
1005
1006         ret = rte_vhost_driver_set_features(dev_pathname, VHOST_BLK_FEATURES);
1007         if (ret != 0) {
1008                 fprintf(stderr, "Set vhost driver features failed\n");
1009                 rte_vhost_driver_unregister(dev_pathname);
1010                 return NULL;
1011         }
1012
1013         /* set proper features */
1014         vhost_dev_install_rte_compat_hooks(dev_pathname);
1015
1016         ctrlr = rte_zmalloc(NULL, sizeof(*ctrlr), RTE_CACHE_LINE_SIZE);
1017         if (!ctrlr) {
1018                 rte_vhost_driver_unregister(dev_pathname);
1019                 return NULL;
1020         }
1021
1022         /* hardcoded block device information with 128MiB */
1023         ctrlr->bdev = vhost_blk_bdev_construct("malloc0", "vhost_blk_malloc0",
1024                                                 4096, 32768, 0);
1025         if (!ctrlr->bdev) {
1026                 rte_free(ctrlr);
1027                 rte_vhost_driver_unregister(dev_pathname);
1028                 return NULL;
1029         }
1030
1031         rte_vhost_driver_callback_register(dev_pathname,
1032                                            &vhost_blk_device_ops);
1033
1034         return ctrlr;
1035 }
1036
1037 static void
1038 signal_handler(__rte_unused int signum)
1039 {
1040         struct vhost_blk_ctrlr *ctrlr;
1041
1042         unlink(dev_pathname);
1043
1044         if (g_should_stop != -1) {
1045                 g_should_stop = 1;
1046                 while (g_should_stop != 2)
1047                         ;
1048         }
1049
1050         ctrlr = vhost_blk_ctrlr_find(dev_pathname);
1051         if (ctrlr != NULL) {
1052                 if (ctrlr->bdev != NULL) {
1053                         rte_free(ctrlr->bdev->data);
1054                         rte_free(ctrlr->bdev);
1055                 }
1056                 rte_free(ctrlr);
1057         }
1058
1059         rte_vhost_driver_unregister(dev_pathname);
1060         exit(0);
1061 }
1062
1063 int main(int argc, char *argv[])
1064 {
1065         int ret;
1066
1067         signal(SIGINT, signal_handler);
1068
1069         /* init EAL */
1070         ret = rte_eal_init(argc, argv);
1071         if (ret < 0)
1072                 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1073
1074         g_vhost_ctrlr = vhost_blk_ctrlr_construct("vhost.socket");
1075         if (g_vhost_ctrlr == NULL) {
1076                 fprintf(stderr, "Construct vhost blk controller failed\n");
1077                 return 0;
1078         }
1079
1080         if (sem_init(&exit_sem, 0, 0) < 0) {
1081                 fprintf(stderr, "Error init exit_sem\n");
1082                 return -1;
1083         }
1084
1085         rte_vhost_driver_start(dev_pathname);
1086
1087         /* loop for exit the application */
1088         while (1)
1089                 sleep(1);
1090
1091         return 0;
1092 }