drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / vdpa / ifc / ifcvf_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <unistd.h>
6 #include <pthread.h>
7 #include <fcntl.h>
8 #include <string.h>
9 #include <sys/ioctl.h>
10 #include <sys/epoll.h>
11 #include <linux/virtio_net.h>
12 #include <stdbool.h>
13
14 #include <rte_eal_paging.h>
15 #include <rte_malloc.h>
16 #include <rte_memory.h>
17 #include <rte_bus_pci.h>
18 #include <rte_vhost.h>
19 #include <rte_vdpa.h>
20 #include <rte_vdpa_dev.h>
21 #include <rte_vfio.h>
22 #include <rte_spinlock.h>
23 #include <rte_log.h>
24 #include <rte_kvargs.h>
25 #include <rte_devargs.h>
26
27 #include "base/ifcvf.h"
28
29 RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.vdpa.ifcvf, NOTICE);
30 #define DRV_LOG(level, fmt, args...) \
31         rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
32                 "IFCVF %s(): " fmt "\n", __func__, ##args)
33
34 #define IFCVF_USED_RING_LEN(size) \
35         ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
36
37 #define IFCVF_VDPA_MODE         "vdpa"
38 #define IFCVF_SW_FALLBACK_LM    "sw-live-migration"
39
40 #define THREAD_NAME_LEN 16
41
42 static const char * const ifcvf_valid_arguments[] = {
43         IFCVF_VDPA_MODE,
44         IFCVF_SW_FALLBACK_LM,
45         NULL
46 };
47
48 struct ifcvf_internal {
49         struct rte_pci_device *pdev;
50         struct ifcvf_hw hw;
51         int configured;
52         int vfio_container_fd;
53         int vfio_group_fd;
54         int vfio_dev_fd;
55         pthread_t tid;  /* thread for notify relay */
56         int epfd;
57         int vid;
58         struct rte_vdpa_device *vdev;
59         uint16_t max_queues;
60         uint64_t features;
61         rte_atomic32_t started;
62         rte_atomic32_t dev_attached;
63         rte_atomic32_t running;
64         rte_spinlock_t lock;
65         bool sw_lm;
66         bool sw_fallback_running;
67         /* mediated vring for sw fallback */
68         struct vring m_vring[IFCVF_MAX_QUEUES * 2];
69         /* eventfd for used ring interrupt */
70         int intr_fd[IFCVF_MAX_QUEUES * 2];
71 };
72
73 struct internal_list {
74         TAILQ_ENTRY(internal_list) next;
75         struct ifcvf_internal *internal;
76 };
77
78 TAILQ_HEAD(internal_list_head, internal_list);
79 static struct internal_list_head internal_list =
80         TAILQ_HEAD_INITIALIZER(internal_list);
81
82 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
83
84 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
85
86 static struct internal_list *
87 find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
88 {
89         int found = 0;
90         struct internal_list *list;
91
92         pthread_mutex_lock(&internal_list_lock);
93
94         TAILQ_FOREACH(list, &internal_list, next) {
95                 if (vdev == list->internal->vdev) {
96                         found = 1;
97                         break;
98                 }
99         }
100
101         pthread_mutex_unlock(&internal_list_lock);
102
103         if (!found)
104                 return NULL;
105
106         return list;
107 }
108
109 static struct internal_list *
110 find_internal_resource_by_dev(struct rte_pci_device *pdev)
111 {
112         int found = 0;
113         struct internal_list *list;
114
115         pthread_mutex_lock(&internal_list_lock);
116
117         TAILQ_FOREACH(list, &internal_list, next) {
118                 if (!rte_pci_addr_cmp(&pdev->addr,
119                                         &list->internal->pdev->addr)) {
120                         found = 1;
121                         break;
122                 }
123         }
124
125         pthread_mutex_unlock(&internal_list_lock);
126
127         if (!found)
128                 return NULL;
129
130         return list;
131 }
132
133 static int
134 ifcvf_vfio_setup(struct ifcvf_internal *internal)
135 {
136         struct rte_pci_device *dev = internal->pdev;
137         char devname[RTE_DEV_NAME_MAX_LEN] = {0};
138         int iommu_group_num;
139         int i, ret;
140
141         internal->vfio_dev_fd = -1;
142         internal->vfio_group_fd = -1;
143         internal->vfio_container_fd = -1;
144
145         rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
146         ret = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
147                         &iommu_group_num);
148         if (ret <= 0) {
149                 DRV_LOG(ERR, "%s failed to get IOMMU group", devname);
150                 return -1;
151         }
152
153         internal->vfio_container_fd = rte_vfio_container_create();
154         if (internal->vfio_container_fd < 0)
155                 return -1;
156
157         internal->vfio_group_fd = rte_vfio_container_group_bind(
158                         internal->vfio_container_fd, iommu_group_num);
159         if (internal->vfio_group_fd < 0)
160                 goto err;
161
162         if (rte_pci_map_device(dev))
163                 goto err;
164
165         internal->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
166
167         for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
168                         i++) {
169                 internal->hw.mem_resource[i].addr =
170                         internal->pdev->mem_resource[i].addr;
171                 internal->hw.mem_resource[i].phys_addr =
172                         internal->pdev->mem_resource[i].phys_addr;
173                 internal->hw.mem_resource[i].len =
174                         internal->pdev->mem_resource[i].len;
175         }
176
177         return 0;
178
179 err:
180         rte_vfio_container_destroy(internal->vfio_container_fd);
181         return -1;
182 }
183
184 static int
185 ifcvf_dma_map(struct ifcvf_internal *internal, bool do_map)
186 {
187         uint32_t i;
188         int ret;
189         struct rte_vhost_memory *mem = NULL;
190         int vfio_container_fd;
191
192         ret = rte_vhost_get_mem_table(internal->vid, &mem);
193         if (ret < 0) {
194                 DRV_LOG(ERR, "failed to get VM memory layout.");
195                 goto exit;
196         }
197
198         vfio_container_fd = internal->vfio_container_fd;
199
200         for (i = 0; i < mem->nregions; i++) {
201                 struct rte_vhost_mem_region *reg;
202
203                 reg = &mem->regions[i];
204                 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
205                         "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
206                         do_map ? "DMA map" : "DMA unmap", i,
207                         reg->host_user_addr, reg->guest_phys_addr, reg->size);
208
209                 if (do_map) {
210                         ret = rte_vfio_container_dma_map(vfio_container_fd,
211                                 reg->host_user_addr, reg->guest_phys_addr,
212                                 reg->size);
213                         if (ret < 0) {
214                                 DRV_LOG(ERR, "DMA map failed.");
215                                 goto exit;
216                         }
217                 } else {
218                         ret = rte_vfio_container_dma_unmap(vfio_container_fd,
219                                 reg->host_user_addr, reg->guest_phys_addr,
220                                 reg->size);
221                         if (ret < 0) {
222                                 DRV_LOG(ERR, "DMA unmap failed.");
223                                 goto exit;
224                         }
225                 }
226         }
227
228 exit:
229         if (mem)
230                 free(mem);
231         return ret;
232 }
233
234 static uint64_t
235 hva_to_gpa(int vid, uint64_t hva)
236 {
237         struct rte_vhost_memory *mem = NULL;
238         struct rte_vhost_mem_region *reg;
239         uint32_t i;
240         uint64_t gpa = 0;
241
242         if (rte_vhost_get_mem_table(vid, &mem) < 0)
243                 goto exit;
244
245         for (i = 0; i < mem->nregions; i++) {
246                 reg = &mem->regions[i];
247
248                 if (hva >= reg->host_user_addr &&
249                                 hva < reg->host_user_addr + reg->size) {
250                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
251                         break;
252                 }
253         }
254
255 exit:
256         if (mem)
257                 free(mem);
258         return gpa;
259 }
260
261 static int
262 vdpa_ifcvf_start(struct ifcvf_internal *internal)
263 {
264         struct ifcvf_hw *hw = &internal->hw;
265         int i, nr_vring;
266         int vid;
267         struct rte_vhost_vring vq;
268         uint64_t gpa;
269
270         vid = internal->vid;
271         nr_vring = rte_vhost_get_vring_num(vid);
272         rte_vhost_get_negotiated_features(vid, &hw->req_features);
273
274         for (i = 0; i < nr_vring; i++) {
275                 rte_vhost_get_vhost_vring(vid, i, &vq);
276                 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
277                 if (gpa == 0) {
278                         DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
279                         return -1;
280                 }
281                 hw->vring[i].desc = gpa;
282
283                 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
284                 if (gpa == 0) {
285                         DRV_LOG(ERR, "Fail to get GPA for available ring.");
286                         return -1;
287                 }
288                 hw->vring[i].avail = gpa;
289
290                 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
291                 if (gpa == 0) {
292                         DRV_LOG(ERR, "Fail to get GPA for used ring.");
293                         return -1;
294                 }
295                 hw->vring[i].used = gpa;
296
297                 hw->vring[i].size = vq.size;
298                 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
299                                 &hw->vring[i].last_used_idx);
300         }
301         hw->nr_vring = i;
302
303         return ifcvf_start_hw(&internal->hw);
304 }
305
306 static void
307 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
308 {
309         struct ifcvf_hw *hw = &internal->hw;
310         uint32_t i;
311         int vid;
312         uint64_t features = 0;
313         uint64_t log_base = 0, log_size = 0;
314         uint64_t len;
315
316         vid = internal->vid;
317         ifcvf_stop_hw(hw);
318
319         for (i = 0; i < hw->nr_vring; i++)
320                 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
321                                 hw->vring[i].last_used_idx);
322
323         if (internal->sw_lm)
324                 return;
325
326         rte_vhost_get_negotiated_features(vid, &features);
327         if (RTE_VHOST_NEED_LOG(features)) {
328                 ifcvf_disable_logging(hw);
329                 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
330                 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
331                                 log_base, IFCVF_LOG_BASE, log_size);
332                 /*
333                  * IFCVF marks dirty memory pages for only packet buffer,
334                  * SW helps to mark the used ring as dirty after device stops.
335                  */
336                 for (i = 0; i < hw->nr_vring; i++) {
337                         len = IFCVF_USED_RING_LEN(hw->vring[i].size);
338                         rte_vhost_log_used_vring(vid, i, 0, len);
339                 }
340         }
341 }
342
343 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
344                 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
345 static int
346 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
347 {
348         int ret;
349         uint32_t i, nr_vring;
350         char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
351         struct vfio_irq_set *irq_set;
352         int *fd_ptr;
353         struct rte_vhost_vring vring;
354         int fd;
355
356         vring.callfd = -1;
357
358         nr_vring = rte_vhost_get_vring_num(internal->vid);
359
360         irq_set = (struct vfio_irq_set *)irq_set_buf;
361         irq_set->argsz = sizeof(irq_set_buf);
362         irq_set->count = nr_vring + 1;
363         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
364                          VFIO_IRQ_SET_ACTION_TRIGGER;
365         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
366         irq_set->start = 0;
367         fd_ptr = (int *)&irq_set->data;
368         fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
369                 rte_intr_fd_get(internal->pdev->intr_handle);
370
371         for (i = 0; i < nr_vring; i++)
372                 internal->intr_fd[i] = -1;
373
374         for (i = 0; i < nr_vring; i++) {
375                 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
376                 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
377                 if ((i & 1) == 0 && m_rx == true) {
378                         fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
379                         if (fd < 0) {
380                                 DRV_LOG(ERR, "can't setup eventfd: %s",
381                                         strerror(errno));
382                                 return -1;
383                         }
384                         internal->intr_fd[i] = fd;
385                         fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
386                 }
387         }
388
389         ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
390         if (ret) {
391                 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
392                                 strerror(errno));
393                 return -1;
394         }
395
396         return 0;
397 }
398
399 static int
400 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
401 {
402         int ret;
403         uint32_t i, nr_vring;
404         char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
405         struct vfio_irq_set *irq_set;
406
407         irq_set = (struct vfio_irq_set *)irq_set_buf;
408         irq_set->argsz = sizeof(irq_set_buf);
409         irq_set->count = 0;
410         irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
411         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
412         irq_set->start = 0;
413
414         nr_vring = rte_vhost_get_vring_num(internal->vid);
415         for (i = 0; i < nr_vring; i++) {
416                 if (internal->intr_fd[i] >= 0)
417                         close(internal->intr_fd[i]);
418                 internal->intr_fd[i] = -1;
419         }
420
421         ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
422         if (ret) {
423                 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
424                                 strerror(errno));
425                 return -1;
426         }
427
428         return 0;
429 }
430
431 static void *
432 notify_relay(void *arg)
433 {
434         int i, kickfd, epfd, nfds = 0;
435         uint32_t qid, q_num;
436         struct epoll_event events[IFCVF_MAX_QUEUES * 2];
437         struct epoll_event ev;
438         uint64_t buf;
439         int nbytes;
440         struct rte_vhost_vring vring;
441         struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
442         struct ifcvf_hw *hw = &internal->hw;
443
444         q_num = rte_vhost_get_vring_num(internal->vid);
445
446         epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
447         if (epfd < 0) {
448                 DRV_LOG(ERR, "failed to create epoll instance.");
449                 return NULL;
450         }
451         internal->epfd = epfd;
452
453         vring.kickfd = -1;
454         for (qid = 0; qid < q_num; qid++) {
455                 ev.events = EPOLLIN | EPOLLPRI;
456                 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
457                 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
458                 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
459                         DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
460                         return NULL;
461                 }
462         }
463
464         for (;;) {
465                 nfds = epoll_wait(epfd, events, q_num, -1);
466                 if (nfds < 0) {
467                         if (errno == EINTR)
468                                 continue;
469                         DRV_LOG(ERR, "epoll_wait return fail\n");
470                         return NULL;
471                 }
472
473                 for (i = 0; i < nfds; i++) {
474                         qid = events[i].data.u32;
475                         kickfd = (uint32_t)(events[i].data.u64 >> 32);
476                         do {
477                                 nbytes = read(kickfd, &buf, 8);
478                                 if (nbytes < 0) {
479                                         if (errno == EINTR ||
480                                             errno == EWOULDBLOCK ||
481                                             errno == EAGAIN)
482                                                 continue;
483                                         DRV_LOG(INFO, "Error reading "
484                                                 "kickfd: %s",
485                                                 strerror(errno));
486                                 }
487                                 break;
488                         } while (1);
489
490                         ifcvf_notify_queue(hw, qid);
491                 }
492         }
493
494         return NULL;
495 }
496
497 static int
498 setup_notify_relay(struct ifcvf_internal *internal)
499 {
500         char name[THREAD_NAME_LEN];
501         int ret;
502
503         snprintf(name, sizeof(name), "ifc-notify-%d", internal->vid);
504         ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay,
505                                      (void *)internal);
506         if (ret != 0) {
507                 DRV_LOG(ERR, "failed to create notify relay pthread.");
508                 return -1;
509         }
510
511         return 0;
512 }
513
514 static int
515 unset_notify_relay(struct ifcvf_internal *internal)
516 {
517         void *status;
518
519         if (internal->tid) {
520                 pthread_cancel(internal->tid);
521                 pthread_join(internal->tid, &status);
522         }
523         internal->tid = 0;
524
525         if (internal->epfd >= 0)
526                 close(internal->epfd);
527         internal->epfd = -1;
528
529         return 0;
530 }
531
532 static int
533 update_datapath(struct ifcvf_internal *internal)
534 {
535         int ret;
536
537         rte_spinlock_lock(&internal->lock);
538
539         if (!rte_atomic32_read(&internal->running) &&
540             (rte_atomic32_read(&internal->started) &&
541              rte_atomic32_read(&internal->dev_attached))) {
542                 ret = ifcvf_dma_map(internal, true);
543                 if (ret)
544                         goto err;
545
546                 ret = vdpa_enable_vfio_intr(internal, false);
547                 if (ret)
548                         goto err;
549
550                 ret = vdpa_ifcvf_start(internal);
551                 if (ret)
552                         goto err;
553
554                 ret = setup_notify_relay(internal);
555                 if (ret)
556                         goto err;
557
558                 rte_atomic32_set(&internal->running, 1);
559         } else if (rte_atomic32_read(&internal->running) &&
560                    (!rte_atomic32_read(&internal->started) ||
561                     !rte_atomic32_read(&internal->dev_attached))) {
562                 ret = unset_notify_relay(internal);
563                 if (ret)
564                         goto err;
565
566                 vdpa_ifcvf_stop(internal);
567
568                 ret = vdpa_disable_vfio_intr(internal);
569                 if (ret)
570                         goto err;
571
572                 ret = ifcvf_dma_map(internal, false);
573                 if (ret)
574                         goto err;
575
576                 rte_atomic32_set(&internal->running, 0);
577         }
578
579         rte_spinlock_unlock(&internal->lock);
580         return 0;
581 err:
582         rte_spinlock_unlock(&internal->lock);
583         return ret;
584 }
585
586 static int
587 m_ifcvf_start(struct ifcvf_internal *internal)
588 {
589         struct ifcvf_hw *hw = &internal->hw;
590         uint32_t i, nr_vring;
591         int vid, ret;
592         struct rte_vhost_vring vq;
593         void *vring_buf;
594         uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
595         uint64_t size;
596         uint64_t gpa;
597
598         memset(&vq, 0, sizeof(vq));
599         vid = internal->vid;
600         nr_vring = rte_vhost_get_vring_num(vid);
601         rte_vhost_get_negotiated_features(vid, &hw->req_features);
602
603         for (i = 0; i < nr_vring; i++) {
604                 rte_vhost_get_vhost_vring(vid, i, &vq);
605
606                 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
607                                 rte_mem_page_size());
608                 vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
609                 vring_init(&internal->m_vring[i], vq.size, vring_buf,
610                                 rte_mem_page_size());
611
612                 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
613                         (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
614                 if (ret < 0) {
615                         DRV_LOG(ERR, "mediated vring DMA map failed.");
616                         goto error;
617                 }
618
619                 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
620                 if (gpa == 0) {
621                         DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
622                         return -1;
623                 }
624                 hw->vring[i].desc = gpa;
625
626                 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
627                 if (gpa == 0) {
628                         DRV_LOG(ERR, "Fail to get GPA for available ring.");
629                         return -1;
630                 }
631                 hw->vring[i].avail = gpa;
632
633                 /* Direct I/O for Tx queue, relay for Rx queue */
634                 if (i & 1) {
635                         gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
636                         if (gpa == 0) {
637                                 DRV_LOG(ERR, "Fail to get GPA for used ring.");
638                                 return -1;
639                         }
640                         hw->vring[i].used = gpa;
641                 } else {
642                         hw->vring[i].used = m_vring_iova +
643                                 (char *)internal->m_vring[i].used -
644                                 (char *)internal->m_vring[i].desc;
645                 }
646
647                 hw->vring[i].size = vq.size;
648
649                 rte_vhost_get_vring_base(vid, i,
650                                 &internal->m_vring[i].avail->idx,
651                                 &internal->m_vring[i].used->idx);
652
653                 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
654                                 &hw->vring[i].last_used_idx);
655
656                 m_vring_iova += size;
657         }
658         hw->nr_vring = nr_vring;
659
660         return ifcvf_start_hw(&internal->hw);
661
662 error:
663         for (i = 0; i < nr_vring; i++)
664                 if (internal->m_vring[i].desc)
665                         rte_free(internal->m_vring[i].desc);
666
667         return -1;
668 }
669
670 static int
671 m_ifcvf_stop(struct ifcvf_internal *internal)
672 {
673         int vid;
674         uint32_t i;
675         struct rte_vhost_vring vq;
676         struct ifcvf_hw *hw = &internal->hw;
677         uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
678         uint64_t size, len;
679
680         vid = internal->vid;
681         ifcvf_stop_hw(hw);
682
683         for (i = 0; i < hw->nr_vring; i++) {
684                 /* synchronize remaining new used entries if any */
685                 if ((i & 1) == 0)
686                         update_used_ring(internal, i);
687
688                 rte_vhost_get_vhost_vring(vid, i, &vq);
689                 len = IFCVF_USED_RING_LEN(vq.size);
690                 rte_vhost_log_used_vring(vid, i, 0, len);
691
692                 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
693                                 rte_mem_page_size());
694                 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
695                         (uint64_t)(uintptr_t)internal->m_vring[i].desc,
696                         m_vring_iova, size);
697
698                 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
699                                 hw->vring[i].last_used_idx);
700                 rte_free(internal->m_vring[i].desc);
701                 m_vring_iova += size;
702         }
703
704         return 0;
705 }
706
707 static void
708 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
709 {
710         rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
711         rte_vhost_vring_call(internal->vid, qid);
712 }
713
714 static void *
715 vring_relay(void *arg)
716 {
717         int i, vid, epfd, fd, nfds;
718         struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
719         struct rte_vhost_vring vring;
720         uint16_t qid, q_num;
721         struct epoll_event events[IFCVF_MAX_QUEUES * 4];
722         struct epoll_event ev;
723         int nbytes;
724         uint64_t buf;
725
726         vid = internal->vid;
727         q_num = rte_vhost_get_vring_num(vid);
728
729         /* add notify fd and interrupt fd to epoll */
730         epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
731         if (epfd < 0) {
732                 DRV_LOG(ERR, "failed to create epoll instance.");
733                 return NULL;
734         }
735         internal->epfd = epfd;
736
737         vring.kickfd = -1;
738         for (qid = 0; qid < q_num; qid++) {
739                 ev.events = EPOLLIN | EPOLLPRI;
740                 rte_vhost_get_vhost_vring(vid, qid, &vring);
741                 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
742                 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
743                         DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
744                         return NULL;
745                 }
746         }
747
748         for (qid = 0; qid < q_num; qid += 2) {
749                 ev.events = EPOLLIN | EPOLLPRI;
750                 /* leave a flag to mark it's for interrupt */
751                 ev.data.u64 = 1 | qid << 1 |
752                         (uint64_t)internal->intr_fd[qid] << 32;
753                 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
754                                 < 0) {
755                         DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
756                         return NULL;
757                 }
758                 update_used_ring(internal, qid);
759         }
760
761         /* start relay with a first kick */
762         for (qid = 0; qid < q_num; qid++)
763                 ifcvf_notify_queue(&internal->hw, qid);
764
765         /* listen to the events and react accordingly */
766         for (;;) {
767                 nfds = epoll_wait(epfd, events, q_num * 2, -1);
768                 if (nfds < 0) {
769                         if (errno == EINTR)
770                                 continue;
771                         DRV_LOG(ERR, "epoll_wait return fail\n");
772                         return NULL;
773                 }
774
775                 for (i = 0; i < nfds; i++) {
776                         fd = (uint32_t)(events[i].data.u64 >> 32);
777                         do {
778                                 nbytes = read(fd, &buf, 8);
779                                 if (nbytes < 0) {
780                                         if (errno == EINTR ||
781                                             errno == EWOULDBLOCK ||
782                                             errno == EAGAIN)
783                                                 continue;
784                                         DRV_LOG(INFO, "Error reading "
785                                                 "kickfd: %s",
786                                                 strerror(errno));
787                                 }
788                                 break;
789                         } while (1);
790
791                         qid = events[i].data.u32 >> 1;
792
793                         if (events[i].data.u32 & 1)
794                                 update_used_ring(internal, qid);
795                         else
796                                 ifcvf_notify_queue(&internal->hw, qid);
797                 }
798         }
799
800         return NULL;
801 }
802
803 static int
804 setup_vring_relay(struct ifcvf_internal *internal)
805 {
806         char name[THREAD_NAME_LEN];
807         int ret;
808
809         snprintf(name, sizeof(name), "ifc-vring-%d", internal->vid);
810         ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay,
811                                      (void *)internal);
812         if (ret != 0) {
813                 DRV_LOG(ERR, "failed to create ring relay pthread.");
814                 return -1;
815         }
816
817         return 0;
818 }
819
820 static int
821 unset_vring_relay(struct ifcvf_internal *internal)
822 {
823         void *status;
824
825         if (internal->tid) {
826                 pthread_cancel(internal->tid);
827                 pthread_join(internal->tid, &status);
828         }
829         internal->tid = 0;
830
831         if (internal->epfd >= 0)
832                 close(internal->epfd);
833         internal->epfd = -1;
834
835         return 0;
836 }
837
838 static int
839 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
840 {
841         int ret;
842         int vid = internal->vid;
843
844         /* stop the direct IO data path */
845         unset_notify_relay(internal);
846         vdpa_ifcvf_stop(internal);
847         vdpa_disable_vfio_intr(internal);
848
849         ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false);
850         if (ret && ret != -ENOTSUP)
851                 goto error;
852
853         /* set up interrupt for interrupt relay */
854         ret = vdpa_enable_vfio_intr(internal, true);
855         if (ret)
856                 goto unmap;
857
858         /* config the VF */
859         ret = m_ifcvf_start(internal);
860         if (ret)
861                 goto unset_intr;
862
863         /* set up vring relay thread */
864         ret = setup_vring_relay(internal);
865         if (ret)
866                 goto stop_vf;
867
868         rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true);
869
870         internal->sw_fallback_running = true;
871
872         return 0;
873
874 stop_vf:
875         m_ifcvf_stop(internal);
876 unset_intr:
877         vdpa_disable_vfio_intr(internal);
878 unmap:
879         ifcvf_dma_map(internal, false);
880 error:
881         return -1;
882 }
883
884 static int
885 ifcvf_dev_config(int vid)
886 {
887         struct rte_vdpa_device *vdev;
888         struct internal_list *list;
889         struct ifcvf_internal *internal;
890
891         vdev = rte_vhost_get_vdpa_device(vid);
892         list = find_internal_resource_by_vdev(vdev);
893         if (list == NULL) {
894                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
895                 return -1;
896         }
897
898         internal = list->internal;
899         internal->vid = vid;
900         rte_atomic32_set(&internal->dev_attached, 1);
901         update_datapath(internal);
902
903         if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
904                 DRV_LOG(NOTICE, "vDPA (%s): software relay is used.",
905                                 vdev->device->name);
906
907         internal->configured = 1;
908         return 0;
909 }
910
911 static int
912 ifcvf_dev_close(int vid)
913 {
914         struct rte_vdpa_device *vdev;
915         struct internal_list *list;
916         struct ifcvf_internal *internal;
917
918         vdev = rte_vhost_get_vdpa_device(vid);
919         list = find_internal_resource_by_vdev(vdev);
920         if (list == NULL) {
921                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
922                 return -1;
923         }
924
925         internal = list->internal;
926
927         if (internal->sw_fallback_running) {
928                 /* unset ring relay */
929                 unset_vring_relay(internal);
930
931                 /* reset VF */
932                 m_ifcvf_stop(internal);
933
934                 /* remove interrupt setting */
935                 vdpa_disable_vfio_intr(internal);
936
937                 /* unset DMA map for guest memory */
938                 ifcvf_dma_map(internal, false);
939
940                 internal->sw_fallback_running = false;
941         } else {
942                 rte_atomic32_set(&internal->dev_attached, 0);
943                 update_datapath(internal);
944         }
945
946         internal->configured = 0;
947         return 0;
948 }
949
950 static int
951 ifcvf_set_features(int vid)
952 {
953         uint64_t features = 0;
954         struct rte_vdpa_device *vdev;
955         struct internal_list *list;
956         struct ifcvf_internal *internal;
957         uint64_t log_base = 0, log_size = 0;
958
959         vdev = rte_vhost_get_vdpa_device(vid);
960         list = find_internal_resource_by_vdev(vdev);
961         if (list == NULL) {
962                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
963                 return -1;
964         }
965
966         internal = list->internal;
967         rte_vhost_get_negotiated_features(vid, &features);
968
969         if (!RTE_VHOST_NEED_LOG(features))
970                 return 0;
971
972         if (internal->sw_lm) {
973                 ifcvf_sw_fallback_switchover(internal);
974         } else {
975                 rte_vhost_get_log_base(vid, &log_base, &log_size);
976                 rte_vfio_container_dma_map(internal->vfio_container_fd,
977                                 log_base, IFCVF_LOG_BASE, log_size);
978                 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
979         }
980
981         return 0;
982 }
983
984 static int
985 ifcvf_get_vfio_group_fd(int vid)
986 {
987         struct rte_vdpa_device *vdev;
988         struct internal_list *list;
989
990         vdev = rte_vhost_get_vdpa_device(vid);
991         list = find_internal_resource_by_vdev(vdev);
992         if (list == NULL) {
993                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
994                 return -1;
995         }
996
997         return list->internal->vfio_group_fd;
998 }
999
1000 static int
1001 ifcvf_get_vfio_device_fd(int vid)
1002 {
1003         struct rte_vdpa_device *vdev;
1004         struct internal_list *list;
1005
1006         vdev = rte_vhost_get_vdpa_device(vid);
1007         list = find_internal_resource_by_vdev(vdev);
1008         if (list == NULL) {
1009                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1010                 return -1;
1011         }
1012
1013         return list->internal->vfio_dev_fd;
1014 }
1015
1016 static int
1017 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1018 {
1019         struct rte_vdpa_device *vdev;
1020         struct internal_list *list;
1021         struct ifcvf_internal *internal;
1022         struct vfio_region_info reg = { .argsz = sizeof(reg) };
1023         int ret;
1024
1025         vdev = rte_vhost_get_vdpa_device(vid);
1026         list = find_internal_resource_by_vdev(vdev);
1027         if (list == NULL) {
1028                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1029                 return -1;
1030         }
1031
1032         internal = list->internal;
1033
1034         reg.index = ifcvf_get_notify_region(&internal->hw);
1035         ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
1036         if (ret) {
1037                 DRV_LOG(ERR, "Get not get device region info: %s",
1038                                 strerror(errno));
1039                 return -1;
1040         }
1041
1042         *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1043         *size = 0x1000;
1044
1045         return 0;
1046 }
1047
1048 static int
1049 ifcvf_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
1050 {
1051         struct internal_list *list;
1052
1053         list = find_internal_resource_by_vdev(vdev);
1054         if (list == NULL) {
1055                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1056                 return -1;
1057         }
1058
1059         *queue_num = list->internal->max_queues;
1060
1061         return 0;
1062 }
1063
1064 static int
1065 ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
1066 {
1067         struct internal_list *list;
1068
1069         list = find_internal_resource_by_vdev(vdev);
1070         if (list == NULL) {
1071                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1072                 return -1;
1073         }
1074
1075         *features = list->internal->features;
1076
1077         return 0;
1078 }
1079
1080 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1081                 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1082                  1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1083                  1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1084                  1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1085                  1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
1086                  1ULL << VHOST_USER_PROTOCOL_F_STATUS)
1087 static int
1088 ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
1089 {
1090         RTE_SET_USED(vdev);
1091
1092         *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1093         return 0;
1094 }
1095
1096 static int
1097 ifcvf_set_vring_state(int vid, int vring, int state)
1098 {
1099         struct rte_vdpa_device *vdev;
1100         struct internal_list *list;
1101         struct ifcvf_internal *internal;
1102         struct ifcvf_hw *hw;
1103         struct ifcvf_pci_common_cfg *cfg;
1104         int ret = 0;
1105
1106         vdev = rte_vhost_get_vdpa_device(vid);
1107         list = find_internal_resource_by_vdev(vdev);
1108         if (list == NULL) {
1109                 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1110                 return -1;
1111         }
1112
1113         internal = list->internal;
1114         if (vring < 0 || vring >= internal->max_queues * 2) {
1115                 DRV_LOG(ERR, "Vring index %d not correct", vring);
1116                 return -1;
1117         }
1118
1119         hw = &internal->hw;
1120         if (!internal->configured)
1121                 goto exit;
1122
1123         cfg = hw->common_cfg;
1124         IFCVF_WRITE_REG16(vring, &cfg->queue_select);
1125         IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
1126
1127         if (!state && hw->vring[vring].enable) {
1128                 ret = vdpa_disable_vfio_intr(internal);
1129                 if (ret)
1130                         return ret;
1131         }
1132
1133         if (state && !hw->vring[vring].enable) {
1134                 ret = vdpa_enable_vfio_intr(internal, false);
1135                 if (ret)
1136                         return ret;
1137         }
1138
1139 exit:
1140         hw->vring[vring].enable = !!state;
1141         return 0;
1142 }
1143
1144 static struct rte_vdpa_dev_ops ifcvf_ops = {
1145         .get_queue_num = ifcvf_get_queue_num,
1146         .get_features = ifcvf_get_vdpa_features,
1147         .get_protocol_features = ifcvf_get_protocol_features,
1148         .dev_conf = ifcvf_dev_config,
1149         .dev_close = ifcvf_dev_close,
1150         .set_vring_state = ifcvf_set_vring_state,
1151         .set_features = ifcvf_set_features,
1152         .migration_done = NULL,
1153         .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1154         .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1155         .get_notify_area = ifcvf_get_notify_area,
1156 };
1157
1158 static inline int
1159 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1160 {
1161         uint16_t *n = extra_args;
1162
1163         if (value == NULL || extra_args == NULL)
1164                 return -EINVAL;
1165
1166         *n = (uint16_t)strtoul(value, NULL, 0);
1167         if (*n == USHRT_MAX && errno == ERANGE)
1168                 return -1;
1169
1170         return 0;
1171 }
1172
1173 static int
1174 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1175                 struct rte_pci_device *pci_dev)
1176 {
1177         uint64_t features;
1178         struct ifcvf_internal *internal = NULL;
1179         struct internal_list *list = NULL;
1180         int vdpa_mode = 0;
1181         int sw_fallback_lm = 0;
1182         struct rte_kvargs *kvlist = NULL;
1183         int ret = 0;
1184
1185         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1186                 return 0;
1187
1188         if (!pci_dev->device.devargs)
1189                 return 1;
1190
1191         kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1192                         ifcvf_valid_arguments);
1193         if (kvlist == NULL)
1194                 return 1;
1195
1196         /* probe only when vdpa mode is specified */
1197         if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1198                 rte_kvargs_free(kvlist);
1199                 return 1;
1200         }
1201
1202         ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1203                         &vdpa_mode);
1204         if (ret < 0 || vdpa_mode == 0) {
1205                 rte_kvargs_free(kvlist);
1206                 return 1;
1207         }
1208
1209         list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1210         if (list == NULL)
1211                 goto error;
1212
1213         internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1214         if (internal == NULL)
1215                 goto error;
1216
1217         internal->pdev = pci_dev;
1218         rte_spinlock_init(&internal->lock);
1219
1220         if (ifcvf_vfio_setup(internal) < 0) {
1221                 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1222                 goto error;
1223         }
1224
1225         if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1226                 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1227                 goto error;
1228         }
1229
1230         internal->configured = 0;
1231         internal->max_queues = IFCVF_MAX_QUEUES;
1232         features = ifcvf_get_features(&internal->hw);
1233         internal->features = (features &
1234                 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1235                 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1236                 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1237                 (1ULL << VIRTIO_NET_F_STATUS) |
1238                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1239                 (1ULL << VHOST_F_LOG_ALL);
1240
1241         list->internal = internal;
1242
1243         if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1244                 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1245                                 &open_int, &sw_fallback_lm);
1246                 if (ret < 0)
1247                         goto error;
1248         }
1249         internal->sw_lm = sw_fallback_lm;
1250
1251         internal->vdev = rte_vdpa_register_device(&pci_dev->device, &ifcvf_ops);
1252         if (internal->vdev == NULL) {
1253                 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1254                 goto error;
1255         }
1256
1257         pthread_mutex_lock(&internal_list_lock);
1258         TAILQ_INSERT_TAIL(&internal_list, list, next);
1259         pthread_mutex_unlock(&internal_list_lock);
1260
1261         rte_atomic32_set(&internal->started, 1);
1262         update_datapath(internal);
1263
1264         rte_kvargs_free(kvlist);
1265         return 0;
1266
1267 error:
1268         rte_kvargs_free(kvlist);
1269         rte_free(list);
1270         rte_free(internal);
1271         return -1;
1272 }
1273
1274 static int
1275 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1276 {
1277         struct ifcvf_internal *internal;
1278         struct internal_list *list;
1279
1280         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1281                 return 0;
1282
1283         list = find_internal_resource_by_dev(pci_dev);
1284         if (list == NULL) {
1285                 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1286                 return -1;
1287         }
1288
1289         internal = list->internal;
1290         rte_atomic32_set(&internal->started, 0);
1291         update_datapath(internal);
1292
1293         rte_pci_unmap_device(internal->pdev);
1294         rte_vfio_container_destroy(internal->vfio_container_fd);
1295         rte_vdpa_unregister_device(internal->vdev);
1296
1297         pthread_mutex_lock(&internal_list_lock);
1298         TAILQ_REMOVE(&internal_list, list, next);
1299         pthread_mutex_unlock(&internal_list_lock);
1300
1301         rte_free(list);
1302         rte_free(internal);
1303
1304         return 0;
1305 }
1306
1307 /*
1308  * IFCVF has the same vendor ID and device ID as virtio net PCI
1309  * device, with its specific subsystem vendor ID and device ID.
1310  */
1311 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1312         { .class_id = RTE_CLASS_ANY_ID,
1313           .vendor_id = IFCVF_VENDOR_ID,
1314           .device_id = IFCVF_DEVICE_ID,
1315           .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1316           .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1317         },
1318
1319         { .vendor_id = 0, /* sentinel */
1320         },
1321 };
1322
1323 static struct rte_pci_driver rte_ifcvf_vdpa = {
1324         .id_table = pci_id_ifcvf_map,
1325         .drv_flags = 0,
1326         .probe = ifcvf_pci_probe,
1327         .remove = ifcvf_pci_remove,
1328 };
1329
1330 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1331 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1332 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");