3238a271c8c7ea7f8804ec3ef0432c567e7f4dd9
[dpdk.git] / lib / vhost / vhost.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <unistd.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
16 #include <linux/if.h>
17
18 #include <rte_log.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
22
23 #include "rte_vhost.h"
24 #include "rte_vdpa.h"
25 #include "rte_vdpa_dev.h"
26
27 #include "rte_vhost_async.h"
28
29 /* Used to indicate that the device is running on a data core */
30 #define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0)
31 /* Used to indicate that the device is ready to operate */
32 #define VIRTIO_DEV_READY ((uint32_t)1 << 1)
33 /* Used to indicate that the built-in vhost net device backend is enabled */
34 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2)
35 /* Used to indicate that the device has its own data path and configured */
36 #define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3)
37 /* Used to indicate that the feature negotiation failed */
38 #define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
39 /* Used to indicate that the virtio_net tx code should fill TX ol_flags */
40 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
41
42 /* Backend value set by guest. */
43 #define VIRTIO_DEV_STOPPED -1
44
45 #define BUF_VECTOR_MAX 256
46
47 #define VHOST_LOG_CACHE_NR 32
48
49 #define MAX_PKT_BURST 32
50
51 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST)
52 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
53
54 #define PACKED_DESC_ENQUEUE_USED_FLAG(w)        \
55         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
56                 VRING_DESC_F_WRITE)
57 #define PACKED_DESC_DEQUEUE_USED_FLAG(w)        \
58         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
59 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
60                                          VRING_DESC_F_INDIRECT)
61
62 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
63                             sizeof(struct vring_packed_desc))
64 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
65
66 #ifdef VHOST_GCC_UNROLL_PRAGMA
67 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
68         for (iter = val; iter < size; iter++)
69 #endif
70
71 #ifdef VHOST_CLANG_UNROLL_PRAGMA
72 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
73         for (iter = val; iter < size; iter++)
74 #endif
75
76 #ifdef VHOST_ICC_UNROLL_PRAGMA
77 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
78         for (iter = val; iter < size; iter++)
79 #endif
80
81 #ifndef vhost_for_each_try_unroll
82 #define vhost_for_each_try_unroll(iter, val, num) \
83         for (iter = val; iter < num; iter++)
84 #endif
85
86 /**
87  * Structure contains buffer address, length and descriptor index
88  * from vring to do scatter RX.
89  */
90 struct buf_vector {
91         uint64_t buf_iova;
92         uint64_t buf_addr;
93         uint32_t buf_len;
94         uint32_t desc_idx;
95 };
96
97 /*
98  * Structure contains the info for each batched memory copy.
99  */
100 struct batch_copy_elem {
101         void *dst;
102         void *src;
103         uint32_t len;
104         uint64_t log_addr;
105 };
106
107 /*
108  * Structure that contains the info for batched dirty logging.
109  */
110 struct log_cache_entry {
111         uint32_t offset;
112         unsigned long val;
113 };
114
115 struct vring_used_elem_packed {
116         uint16_t id;
117         uint16_t flags;
118         uint32_t len;
119         uint32_t count;
120 };
121
122 /**
123  * inflight async packet information
124  */
125 struct async_inflight_info {
126         struct rte_mbuf *mbuf;
127         uint16_t descs; /* num of descs inflight */
128         uint16_t nr_buffers; /* num of buffers inflight for packed ring */
129 };
130
131 struct vhost_async {
132         /* operation callbacks for DMA */
133         struct rte_vhost_async_channel_ops ops;
134
135         struct rte_vhost_iov_iter src_iov_iter[VHOST_MAX_ASYNC_IT];
136         struct rte_vhost_iov_iter dst_iov_iter[VHOST_MAX_ASYNC_IT];
137         struct iovec src_iovec[VHOST_MAX_ASYNC_VEC];
138         struct iovec dst_iovec[VHOST_MAX_ASYNC_VEC];
139
140         /* data transfer status */
141         struct async_inflight_info *pkts_info;
142         uint16_t pkts_idx;
143         uint16_t pkts_inflight_n;
144         uint16_t last_pkts_n;
145         union {
146                 struct vring_used_elem  *descs_split;
147                 struct vring_used_elem_packed *buffers_packed;
148         };
149         union {
150                 uint16_t desc_idx_split;
151                 uint16_t buffer_idx_packed;
152         };
153         union {
154                 uint16_t last_desc_idx_split;
155                 uint16_t last_buffer_idx_packed;
156         };
157 };
158
159 /**
160  * Structure contains variables relevant to RX/TX virtqueues.
161  */
162 struct vhost_virtqueue {
163         union {
164                 struct vring_desc       *desc;
165                 struct vring_packed_desc   *desc_packed;
166         };
167         union {
168                 struct vring_avail      *avail;
169                 struct vring_packed_desc_event *driver_event;
170         };
171         union {
172                 struct vring_used       *used;
173                 struct vring_packed_desc_event *device_event;
174         };
175         uint16_t                size;
176
177         uint16_t                last_avail_idx;
178         uint16_t                last_used_idx;
179         /* Last used index we notify to front end. */
180         uint16_t                signalled_used;
181         bool                    signalled_used_valid;
182 #define VIRTIO_INVALID_EVENTFD          (-1)
183 #define VIRTIO_UNINITIALIZED_EVENTFD    (-2)
184
185         bool                    enabled;
186         bool                    access_ok;
187         bool                    ready;
188
189         rte_spinlock_t          access_lock;
190
191
192         union {
193                 struct vring_used_elem  *shadow_used_split;
194                 struct vring_used_elem_packed *shadow_used_packed;
195         };
196         uint16_t                shadow_used_idx;
197         /* Record packed ring enqueue latest desc cache aligned index */
198         uint16_t                shadow_aligned_idx;
199         /* Record packed ring first dequeue desc index */
200         uint16_t                shadow_last_used_idx;
201
202         uint16_t                batch_copy_nb_elems;
203         struct batch_copy_elem  *batch_copy_elems;
204         int                     numa_node;
205         bool                    used_wrap_counter;
206         bool                    avail_wrap_counter;
207
208         /* Physical address of used ring, for logging */
209         uint16_t                log_cache_nb_elem;
210         uint64_t                log_guest_addr;
211         struct log_cache_entry  *log_cache;
212
213         rte_rwlock_t    iotlb_lock;
214         rte_rwlock_t    iotlb_pending_lock;
215         struct rte_mempool *iotlb_pool;
216         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
217         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
218         int                             iotlb_cache_nr;
219
220         /* Used to notify the guest (trigger interrupt) */
221         int                     callfd;
222         /* Currently unused as polling mode is enabled */
223         int                     kickfd;
224
225         /* inflight share memory info */
226         union {
227                 struct rte_vhost_inflight_info_split *inflight_split;
228                 struct rte_vhost_inflight_info_packed *inflight_packed;
229         };
230         struct rte_vhost_resubmit_info *resubmit_inflight;
231         uint64_t                global_counter;
232
233         struct vhost_async      *async;
234
235         int                     notif_enable;
236 #define VIRTIO_UNINITIALIZED_NOTIF      (-1)
237
238         struct vhost_vring_addr ring_addrs;
239 } __rte_cache_aligned;
240
241 /* Virtio device status as per Virtio specification */
242 #define VIRTIO_DEVICE_STATUS_RESET              0x00
243 #define VIRTIO_DEVICE_STATUS_ACK                0x01
244 #define VIRTIO_DEVICE_STATUS_DRIVER             0x02
245 #define VIRTIO_DEVICE_STATUS_DRIVER_OK          0x04
246 #define VIRTIO_DEVICE_STATUS_FEATURES_OK        0x08
247 #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET     0x40
248 #define VIRTIO_DEVICE_STATUS_FAILED             0x80
249
250 #define VHOST_MAX_VRING                 0x100
251 #define VHOST_MAX_QUEUE_PAIRS           0x80
252
253 /* Declare IOMMU related bits for older kernels */
254 #ifndef VIRTIO_F_IOMMU_PLATFORM
255
256 #define VIRTIO_F_IOMMU_PLATFORM 33
257
258 struct vhost_iotlb_msg {
259         __u64 iova;
260         __u64 size;
261         __u64 uaddr;
262 #define VHOST_ACCESS_RO      0x1
263 #define VHOST_ACCESS_WO      0x2
264 #define VHOST_ACCESS_RW      0x3
265         __u8 perm;
266 #define VHOST_IOTLB_MISS           1
267 #define VHOST_IOTLB_UPDATE         2
268 #define VHOST_IOTLB_INVALIDATE     3
269 #define VHOST_IOTLB_ACCESS_FAIL    4
270         __u8 type;
271 };
272
273 #define VHOST_IOTLB_MSG 0x1
274
275 struct vhost_msg {
276         int type;
277         union {
278                 struct vhost_iotlb_msg iotlb;
279                 __u8 padding[64];
280         };
281 };
282 #endif
283
284 /*
285  * Define virtio 1.0 for older kernels
286  */
287 #ifndef VIRTIO_F_VERSION_1
288  #define VIRTIO_F_VERSION_1 32
289 #endif
290
291 /* Declare packed ring related bits for older kernels */
292 #ifndef VIRTIO_F_RING_PACKED
293
294 #define VIRTIO_F_RING_PACKED 34
295
296 struct vring_packed_desc {
297         uint64_t addr;
298         uint32_t len;
299         uint16_t id;
300         uint16_t flags;
301 };
302
303 struct vring_packed_desc_event {
304         uint16_t off_wrap;
305         uint16_t flags;
306 };
307 #endif
308
309 /*
310  * Declare below packed ring defines unconditionally
311  * as Kernel header might use different names.
312  */
313 #define VRING_DESC_F_AVAIL      (1ULL << 7)
314 #define VRING_DESC_F_USED       (1ULL << 15)
315
316 #define VRING_EVENT_F_ENABLE 0x0
317 #define VRING_EVENT_F_DISABLE 0x1
318 #define VRING_EVENT_F_DESC 0x2
319
320 /*
321  * Available and used descs are in same order
322  */
323 #ifndef VIRTIO_F_IN_ORDER
324 #define VIRTIO_F_IN_ORDER      35
325 #endif
326
327 /* Features supported by this builtin vhost-user net driver. */
328 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
329                                 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
330                                 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
331                                 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
332                                 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
333                                 (1ULL << VIRTIO_NET_F_MQ)      | \
334                                 (1ULL << VIRTIO_F_VERSION_1)   | \
335                                 (1ULL << VHOST_F_LOG_ALL)      | \
336                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
337                                 (1ULL << VIRTIO_NET_F_GSO) | \
338                                 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
339                                 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
340                                 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
341                                 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
342                                 (1ULL << VIRTIO_NET_F_CSUM)    | \
343                                 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
344                                 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
345                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
346                                 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
347                                 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
348                                 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
349                                 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
350                                 (1ULL << VIRTIO_NET_F_MTU)  | \
351                                 (1ULL << VIRTIO_F_IN_ORDER) | \
352                                 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
353                                 (1ULL << VIRTIO_F_RING_PACKED))
354
355
356 struct guest_page {
357         uint64_t guest_phys_addr;
358         uint64_t host_phys_addr;
359         uint64_t size;
360 };
361
362 struct inflight_mem_info {
363         int             fd;
364         void            *addr;
365         uint64_t        size;
366 };
367
368 /**
369  * Device structure contains all configuration information relating
370  * to the device.
371  */
372 struct virtio_net {
373         /* Frontend (QEMU) memory and memory region information */
374         struct rte_vhost_memory *mem;
375         uint64_t                features;
376         uint64_t                protocol_features;
377         int                     vid;
378         uint32_t                flags;
379         uint16_t                vhost_hlen;
380         /* to tell if we need broadcast rarp packet */
381         int16_t                 broadcast_rarp;
382         uint32_t                nr_vring;
383         int                     async_copy;
384
385         int                     extbuf;
386         int                     linearbuf;
387         struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
388         struct inflight_mem_info *inflight_info;
389 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
390         char                    ifname[IF_NAME_SZ];
391         uint64_t                log_size;
392         uint64_t                log_base;
393         uint64_t                log_addr;
394         struct rte_ether_addr   mac;
395         uint16_t                mtu;
396         uint8_t                 status;
397
398         struct vhost_device_ops const *notify_ops;
399
400         uint32_t                nr_guest_pages;
401         uint32_t                max_guest_pages;
402         struct guest_page       *guest_pages;
403
404         int                     slave_req_fd;
405         rte_spinlock_t          slave_req_lock;
406
407         int                     postcopy_ufd;
408         int                     postcopy_listening;
409
410         struct rte_vdpa_device *vdpa_dev;
411
412         /* context data for the external message handlers */
413         void                    *extern_data;
414         /* pre and post vhost user message handlers for the device */
415         struct rte_vhost_user_extern_ops extern_ops;
416 } __rte_cache_aligned;
417
418 static __rte_always_inline bool
419 vq_is_packed(struct virtio_net *dev)
420 {
421         return dev->features & (1ull << VIRTIO_F_RING_PACKED);
422 }
423
424 static inline bool
425 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
426 {
427         uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
428
429         return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
430                 wrap_counter != !!(flags & VRING_DESC_F_USED);
431 }
432
433 static inline void
434 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
435 {
436         vq->last_used_idx += num;
437         if (vq->last_used_idx >= vq->size) {
438                 vq->used_wrap_counter ^= 1;
439                 vq->last_used_idx -= vq->size;
440         }
441 }
442
443 static inline void
444 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
445 {
446         vq->last_avail_idx += num;
447         if (vq->last_avail_idx >= vq->size) {
448                 vq->avail_wrap_counter ^= 1;
449                 vq->last_avail_idx -= vq->size;
450         }
451 }
452
453 void __vhost_log_cache_write(struct virtio_net *dev,
454                 struct vhost_virtqueue *vq,
455                 uint64_t addr, uint64_t len);
456 void __vhost_log_cache_write_iova(struct virtio_net *dev,
457                 struct vhost_virtqueue *vq,
458                 uint64_t iova, uint64_t len);
459 void __vhost_log_cache_sync(struct virtio_net *dev,
460                 struct vhost_virtqueue *vq);
461 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
462 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
463                             uint64_t iova, uint64_t len);
464
465 static __rte_always_inline void
466 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
467 {
468         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
469                 __vhost_log_write(dev, addr, len);
470 }
471
472 static __rte_always_inline void
473 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
474 {
475         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
476                 __vhost_log_cache_sync(dev, vq);
477 }
478
479 static __rte_always_inline void
480 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
481                         uint64_t addr, uint64_t len)
482 {
483         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
484                 __vhost_log_cache_write(dev, vq, addr, len);
485 }
486
487 static __rte_always_inline void
488 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
489                         uint64_t offset, uint64_t len)
490 {
491         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
492                 if (unlikely(vq->log_guest_addr == 0))
493                         return;
494                 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
495                                         len);
496         }
497 }
498
499 static __rte_always_inline void
500 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
501                      uint64_t offset, uint64_t len)
502 {
503         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
504                 if (unlikely(vq->log_guest_addr == 0))
505                         return;
506                 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
507         }
508 }
509
510 static __rte_always_inline void
511 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
512                            uint64_t iova, uint64_t len)
513 {
514         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
515                 return;
516
517         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
518                 __vhost_log_cache_write_iova(dev, vq, iova, len);
519         else
520                 __vhost_log_cache_write(dev, vq, iova, len);
521 }
522
523 static __rte_always_inline void
524 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
525                            uint64_t iova, uint64_t len)
526 {
527         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
528                 return;
529
530         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
531                 __vhost_log_write_iova(dev, vq, iova, len);
532         else
533                 __vhost_log_write(dev, iova, len);
534 }
535
536 extern int vhost_config_log_level;
537 extern int vhost_data_log_level;
538
539 #define VHOST_LOG_CONFIG(level, fmt, args...)                   \
540         rte_log(RTE_LOG_ ## level, vhost_config_log_level,      \
541                 "VHOST_CONFIG: " fmt, ##args)
542
543 #define VHOST_LOG_DATA(level, fmt, args...) \
544         (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?        \
545          rte_log(RTE_LOG_ ## level,  vhost_data_log_level,      \
546                 "VHOST_DATA : " fmt, ##args) :                  \
547          0)
548
549 #ifdef RTE_LIBRTE_VHOST_DEBUG
550 #define VHOST_MAX_PRINT_BUFF 6072
551 #define PRINT_PACKET(device, addr, size, header) do { \
552         char *pkt_addr = (char *)(addr); \
553         unsigned int index; \
554         char packet[VHOST_MAX_PRINT_BUFF]; \
555         \
556         if ((header)) \
557                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
558         else \
559                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
560         for (index = 0; index < (size); index++) { \
561                 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
562                         "%02hhx ", pkt_addr[index]); \
563         } \
564         snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
565         \
566         VHOST_LOG_DATA(DEBUG, "%s", packet); \
567 } while (0)
568 #else
569 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
570 #endif
571
572 #define MAX_VHOST_DEVICE        1024
573 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
574
575 #define VHOST_BINARY_SEARCH_THRESH 256
576
577 static __rte_always_inline int guest_page_addrcmp(const void *p1,
578                                                 const void *p2)
579 {
580         const struct guest_page *page1 = (const struct guest_page *)p1;
581         const struct guest_page *page2 = (const struct guest_page *)p2;
582
583         if (page1->guest_phys_addr > page2->guest_phys_addr)
584                 return 1;
585         if (page1->guest_phys_addr < page2->guest_phys_addr)
586                 return -1;
587
588         return 0;
589 }
590
591 static __rte_always_inline rte_iova_t
592 gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
593         uint64_t gpa_size, uint64_t *hpa_size)
594 {
595         uint32_t i;
596         struct guest_page *page;
597         struct guest_page key;
598
599         *hpa_size = gpa_size;
600         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
601                 key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
602                 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
603                                sizeof(struct guest_page), guest_page_addrcmp);
604                 if (page) {
605                         if (gpa + gpa_size <=
606                                         page->guest_phys_addr + page->size) {
607                                 return gpa - page->guest_phys_addr +
608                                         page->host_phys_addr;
609                         } else if (gpa < page->guest_phys_addr +
610                                                 page->size) {
611                                 *hpa_size = page->guest_phys_addr +
612                                         page->size - gpa;
613                                 return gpa - page->guest_phys_addr +
614                                         page->host_phys_addr;
615                         }
616                 }
617         } else {
618                 for (i = 0; i < dev->nr_guest_pages; i++) {
619                         page = &dev->guest_pages[i];
620
621                         if (gpa >= page->guest_phys_addr) {
622                                 if (gpa + gpa_size <=
623                                         page->guest_phys_addr + page->size) {
624                                         return gpa - page->guest_phys_addr +
625                                                 page->host_phys_addr;
626                                 } else if (gpa < page->guest_phys_addr +
627                                                         page->size) {
628                                         *hpa_size = page->guest_phys_addr +
629                                                 page->size - gpa;
630                                         return gpa - page->guest_phys_addr +
631                                                 page->host_phys_addr;
632                                 }
633                         }
634                 }
635         }
636
637         *hpa_size = 0;
638         return 0;
639 }
640
641 /* Convert guest physical address to host physical address */
642 static __rte_always_inline rte_iova_t
643 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
644 {
645         rte_iova_t hpa;
646         uint64_t hpa_size;
647
648         hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
649         return hpa_size == size ? hpa : 0;
650 }
651
652 static __rte_always_inline uint64_t
653 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
654 {
655         struct rte_vhost_mem_region *r;
656         uint32_t i;
657
658         if (unlikely(!dev || !dev->mem))
659                 return 0;
660
661         for (i = 0; i < dev->mem->nregions; i++) {
662                 r = &dev->mem->regions[i];
663
664                 if (vva >= r->host_user_addr &&
665                     vva + len <  r->host_user_addr + r->size) {
666                         return r->guest_phys_addr + vva - r->host_user_addr;
667                 }
668         }
669         return 0;
670 }
671
672 static __rte_always_inline struct virtio_net *
673 get_device(int vid)
674 {
675         struct virtio_net *dev = vhost_devices[vid];
676
677         if (unlikely(!dev)) {
678                 VHOST_LOG_CONFIG(ERR,
679                         "(%d) device not found.\n", vid);
680         }
681
682         return dev;
683 }
684
685 int vhost_new_device(void);
686 void cleanup_device(struct virtio_net *dev, int destroy);
687 void reset_device(struct virtio_net *dev);
688 void vhost_destroy_device(int);
689 void vhost_destroy_device_notify(struct virtio_net *dev);
690
691 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
692 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
693 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
694
695 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
696
697 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
698
699 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
700 void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags);
701 void vhost_enable_extbuf(int vid);
702 void vhost_enable_linearbuf(int vid);
703 int vhost_enable_guest_notification(struct virtio_net *dev,
704                 struct vhost_virtqueue *vq, int enable);
705
706 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
707
708 /*
709  * Backend-specific cleanup.
710  *
711  * TODO: fix it; we have one backend now
712  */
713 void vhost_backend_cleanup(struct virtio_net *dev);
714
715 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
716                         uint64_t iova, uint64_t *len, uint8_t perm);
717 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
718                         struct vhost_virtqueue *vq,
719                         uint64_t desc_addr, uint64_t desc_len);
720 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
721 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
722                 uint64_t log_addr);
723 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
724
725 static __rte_always_inline uint64_t
726 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
727                         uint64_t iova, uint64_t *len, uint8_t perm)
728 {
729         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
730                 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
731
732         return __vhost_iova_to_vva(dev, vq, iova, len, perm);
733 }
734
735 #define vhost_avail_event(vr) \
736         (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
737 #define vhost_used_event(vr) \
738         (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
739
740 /*
741  * The following is used with VIRTIO_RING_F_EVENT_IDX.
742  * Assuming a given event_idx value from the other size, if we have
743  * just incremented index from old to new_idx, should we trigger an
744  * event?
745  */
746 static __rte_always_inline int
747 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
748 {
749         return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
750 }
751
752 static __rte_always_inline void
753 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
754 {
755         /* Flush used->idx update before we read avail->flags. */
756         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
757
758         /* Don't kick guest if we don't reach index specified by guest. */
759         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
760                 uint16_t old = vq->signalled_used;
761                 uint16_t new = vq->last_used_idx;
762                 bool signalled_used_valid = vq->signalled_used_valid;
763
764                 vq->signalled_used = new;
765                 vq->signalled_used_valid = true;
766
767                 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
768                         __func__,
769                         vhost_used_event(vq),
770                         old, new);
771
772                 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
773                                         (vq->callfd >= 0)) ||
774                                 unlikely(!signalled_used_valid)) {
775                         eventfd_write(vq->callfd, (eventfd_t) 1);
776                         if (dev->notify_ops->guest_notified)
777                                 dev->notify_ops->guest_notified(dev->vid);
778                 }
779         } else {
780                 /* Kick the guest if necessary. */
781                 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
782                                 && (vq->callfd >= 0)) {
783                         eventfd_write(vq->callfd, (eventfd_t)1);
784                         if (dev->notify_ops->guest_notified)
785                                 dev->notify_ops->guest_notified(dev->vid);
786                 }
787         }
788 }
789
790 static __rte_always_inline void
791 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
792 {
793         uint16_t old, new, off, off_wrap;
794         bool signalled_used_valid, kick = false;
795
796         /* Flush used desc update. */
797         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
798
799         if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
800                 if (vq->driver_event->flags !=
801                                 VRING_EVENT_F_DISABLE)
802                         kick = true;
803                 goto kick;
804         }
805
806         old = vq->signalled_used;
807         new = vq->last_used_idx;
808         vq->signalled_used = new;
809         signalled_used_valid = vq->signalled_used_valid;
810         vq->signalled_used_valid = true;
811
812         if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
813                 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
814                         kick = true;
815                 goto kick;
816         }
817
818         if (unlikely(!signalled_used_valid)) {
819                 kick = true;
820                 goto kick;
821         }
822
823         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
824
825         off_wrap = vq->driver_event->off_wrap;
826         off = off_wrap & ~(1 << 15);
827
828         if (new <= old)
829                 old -= vq->size;
830
831         if (vq->used_wrap_counter != off_wrap >> 15)
832                 off -= vq->size;
833
834         if (vhost_need_event(off, new, old))
835                 kick = true;
836 kick:
837         if (kick) {
838                 eventfd_write(vq->callfd, (eventfd_t)1);
839                 if (dev->notify_ops->guest_notified)
840                         dev->notify_ops->guest_notified(dev->vid);
841         }
842 }
843
844 static __rte_always_inline void
845 free_ind_table(void *idesc)
846 {
847         rte_free(idesc);
848 }
849
850 static __rte_always_inline void
851 restore_mbuf(struct rte_mbuf *m)
852 {
853         uint32_t mbuf_size, priv_size;
854
855         while (m) {
856                 priv_size = rte_pktmbuf_priv_size(m->pool);
857                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
858                 /* start of buffer is after mbuf structure and priv data */
859
860                 m->buf_addr = (char *)m + mbuf_size;
861                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
862                 m = m->next;
863         }
864 }
865
866 static __rte_always_inline bool
867 mbuf_is_consumed(struct rte_mbuf *m)
868 {
869         while (m) {
870                 if (rte_mbuf_refcnt_read(m) > 1)
871                         return false;
872                 m = m->next;
873         }
874
875         return true;
876 }
877
878 #endif /* _VHOST_NET_CDEV_H_ */