vhost: replace vDPA device ID in Vhost
[dpdk.git] / lib / librte_vhost / vhost.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <unistd.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
16 #include <linux/if.h>
17
18 #include <rte_log.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
22
23 #include "rte_vhost.h"
24 #include "rte_vdpa.h"
25
26 /* Used to indicate that the device is running on a data core */
27 #define VIRTIO_DEV_RUNNING 1
28 /* Used to indicate that the device is ready to operate */
29 #define VIRTIO_DEV_READY 2
30 /* Used to indicate that the built-in vhost net device backend is enabled */
31 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
32 /* Used to indicate that the device has its own data path and configured */
33 #define VIRTIO_DEV_VDPA_CONFIGURED 8
34
35 /* Backend value set by guest. */
36 #define VIRTIO_DEV_STOPPED -1
37
38 #define BUF_VECTOR_MAX 256
39
40 #define VHOST_LOG_CACHE_NR 32
41
42 #define PACKED_DESC_ENQUEUE_USED_FLAG(w)        \
43         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
44                 VRING_DESC_F_WRITE)
45 #define PACKED_DESC_DEQUEUE_USED_FLAG(w)        \
46         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
47 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
48                                          VRING_DESC_F_INDIRECT)
49
50 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
51                             sizeof(struct vring_packed_desc))
52 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
53
54 #ifdef VHOST_GCC_UNROLL_PRAGMA
55 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
56         for (iter = val; iter < size; iter++)
57 #endif
58
59 #ifdef VHOST_CLANG_UNROLL_PRAGMA
60 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
61         for (iter = val; iter < size; iter++)
62 #endif
63
64 #ifdef VHOST_ICC_UNROLL_PRAGMA
65 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
66         for (iter = val; iter < size; iter++)
67 #endif
68
69 #ifndef vhost_for_each_try_unroll
70 #define vhost_for_each_try_unroll(iter, val, num) \
71         for (iter = val; iter < num; iter++)
72 #endif
73
74 /**
75  * Structure contains buffer address, length and descriptor index
76  * from vring to do scatter RX.
77  */
78 struct buf_vector {
79         uint64_t buf_iova;
80         uint64_t buf_addr;
81         uint32_t buf_len;
82         uint32_t desc_idx;
83 };
84
85 /*
86  * A structure to hold some fields needed in zero copy code path,
87  * mainly for associating an mbuf with the right desc_idx.
88  */
89 struct zcopy_mbuf {
90         struct rte_mbuf *mbuf;
91         uint32_t desc_idx;
92         uint16_t desc_count;
93         uint16_t in_use;
94
95         TAILQ_ENTRY(zcopy_mbuf) next;
96 };
97 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
98
99 /*
100  * Structure contains the info for each batched memory copy.
101  */
102 struct batch_copy_elem {
103         void *dst;
104         void *src;
105         uint32_t len;
106         uint64_t log_addr;
107 };
108
109 /*
110  * Structure that contains the info for batched dirty logging.
111  */
112 struct log_cache_entry {
113         uint32_t offset;
114         unsigned long val;
115 };
116
117 struct vring_used_elem_packed {
118         uint16_t id;
119         uint16_t flags;
120         uint32_t len;
121         uint32_t count;
122 };
123
124 /**
125  * Structure contains variables relevant to RX/TX virtqueues.
126  */
127 struct vhost_virtqueue {
128         union {
129                 struct vring_desc       *desc;
130                 struct vring_packed_desc   *desc_packed;
131         };
132         union {
133                 struct vring_avail      *avail;
134                 struct vring_packed_desc_event *driver_event;
135         };
136         union {
137                 struct vring_used       *used;
138                 struct vring_packed_desc_event *device_event;
139         };
140         uint32_t                size;
141
142         uint16_t                last_avail_idx;
143         uint16_t                last_used_idx;
144         /* Last used index we notify to front end. */
145         uint16_t                signalled_used;
146         bool                    signalled_used_valid;
147 #define VIRTIO_INVALID_EVENTFD          (-1)
148 #define VIRTIO_UNINITIALIZED_EVENTFD    (-2)
149
150         /* Backend value to determine if device should started/stopped */
151         int                     backend;
152         int                     enabled;
153         int                     access_ok;
154         rte_spinlock_t          access_lock;
155
156         /* Used to notify the guest (trigger interrupt) */
157         int                     callfd;
158         /* Currently unused as polling mode is enabled */
159         int                     kickfd;
160
161         /* Physical address of used ring, for logging */
162         uint64_t                log_guest_addr;
163
164         /* inflight share memory info */
165         union {
166                 struct rte_vhost_inflight_info_split *inflight_split;
167                 struct rte_vhost_inflight_info_packed *inflight_packed;
168         };
169         struct rte_vhost_resubmit_info *resubmit_inflight;
170         uint64_t                global_counter;
171
172         uint16_t                nr_zmbuf;
173         uint16_t                zmbuf_size;
174         uint16_t                last_zmbuf_idx;
175         struct zcopy_mbuf       *zmbufs;
176         struct zcopy_mbuf_list  zmbuf_list;
177
178         union {
179                 struct vring_used_elem  *shadow_used_split;
180                 struct vring_used_elem_packed *shadow_used_packed;
181         };
182         uint16_t                shadow_used_idx;
183         /* Record packed ring enqueue latest desc cache aligned index */
184         uint16_t                shadow_aligned_idx;
185         /* Record packed ring first dequeue desc index */
186         uint16_t                shadow_last_used_idx;
187         struct vhost_vring_addr ring_addrs;
188
189         struct batch_copy_elem  *batch_copy_elems;
190         uint16_t                batch_copy_nb_elems;
191         bool                    used_wrap_counter;
192         bool                    avail_wrap_counter;
193
194         struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
195         uint16_t log_cache_nb_elem;
196
197         rte_rwlock_t    iotlb_lock;
198         rte_rwlock_t    iotlb_pending_lock;
199         struct rte_mempool *iotlb_pool;
200         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
201         int                             iotlb_cache_nr;
202         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
203 } __rte_cache_aligned;
204
205 /* Old kernels have no such macros defined */
206 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
207  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
208 #endif
209
210 #ifndef VIRTIO_NET_F_MQ
211  #define VIRTIO_NET_F_MQ                22
212 #endif
213
214 #define VHOST_MAX_VRING                 0x100
215 #define VHOST_MAX_QUEUE_PAIRS           0x80
216
217 #ifndef VIRTIO_NET_F_MTU
218  #define VIRTIO_NET_F_MTU 3
219 #endif
220
221 #ifndef VIRTIO_F_ANY_LAYOUT
222  #define VIRTIO_F_ANY_LAYOUT            27
223 #endif
224
225 /* Declare IOMMU related bits for older kernels */
226 #ifndef VIRTIO_F_IOMMU_PLATFORM
227
228 #define VIRTIO_F_IOMMU_PLATFORM 33
229
230 struct vhost_iotlb_msg {
231         __u64 iova;
232         __u64 size;
233         __u64 uaddr;
234 #define VHOST_ACCESS_RO      0x1
235 #define VHOST_ACCESS_WO      0x2
236 #define VHOST_ACCESS_RW      0x3
237         __u8 perm;
238 #define VHOST_IOTLB_MISS           1
239 #define VHOST_IOTLB_UPDATE         2
240 #define VHOST_IOTLB_INVALIDATE     3
241 #define VHOST_IOTLB_ACCESS_FAIL    4
242         __u8 type;
243 };
244
245 #define VHOST_IOTLB_MSG 0x1
246
247 struct vhost_msg {
248         int type;
249         union {
250                 struct vhost_iotlb_msg iotlb;
251                 __u8 padding[64];
252         };
253 };
254 #endif
255
256 /*
257  * Define virtio 1.0 for older kernels
258  */
259 #ifndef VIRTIO_F_VERSION_1
260  #define VIRTIO_F_VERSION_1 32
261 #endif
262
263 /* Declare packed ring related bits for older kernels */
264 #ifndef VIRTIO_F_RING_PACKED
265
266 #define VIRTIO_F_RING_PACKED 34
267
268 struct vring_packed_desc {
269         uint64_t addr;
270         uint32_t len;
271         uint16_t id;
272         uint16_t flags;
273 };
274
275 struct vring_packed_desc_event {
276         uint16_t off_wrap;
277         uint16_t flags;
278 };
279 #endif
280
281 /*
282  * Declare below packed ring defines unconditionally
283  * as Kernel header might use different names.
284  */
285 #define VRING_DESC_F_AVAIL      (1ULL << 7)
286 #define VRING_DESC_F_USED       (1ULL << 15)
287
288 #define VRING_EVENT_F_ENABLE 0x0
289 #define VRING_EVENT_F_DISABLE 0x1
290 #define VRING_EVENT_F_DESC 0x2
291
292 /*
293  * Available and used descs are in same order
294  */
295 #ifndef VIRTIO_F_IN_ORDER
296 #define VIRTIO_F_IN_ORDER      35
297 #endif
298
299 /* Features supported by this builtin vhost-user net driver. */
300 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
301                                 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
302                                 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
303                                 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
304                                 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
305                                 (1ULL << VIRTIO_NET_F_MQ)      | \
306                                 (1ULL << VIRTIO_F_VERSION_1)   | \
307                                 (1ULL << VHOST_F_LOG_ALL)      | \
308                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
309                                 (1ULL << VIRTIO_NET_F_GSO) | \
310                                 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
311                                 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
312                                 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
313                                 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
314                                 (1ULL << VIRTIO_NET_F_CSUM)    | \
315                                 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
316                                 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
317                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
318                                 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
319                                 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
320                                 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
321                                 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
322                                 (1ULL << VIRTIO_NET_F_MTU)  | \
323                                 (1ULL << VIRTIO_F_IN_ORDER) | \
324                                 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
325                                 (1ULL << VIRTIO_F_RING_PACKED))
326
327
328 struct guest_page {
329         uint64_t guest_phys_addr;
330         uint64_t host_phys_addr;
331         uint64_t size;
332 };
333
334 struct inflight_mem_info {
335         int             fd;
336         void            *addr;
337         uint64_t        size;
338 };
339
340 /**
341  * Device structure contains all configuration information relating
342  * to the device.
343  */
344 struct virtio_net {
345         /* Frontend (QEMU) memory and memory region information */
346         struct rte_vhost_memory *mem;
347         uint64_t                features;
348         uint64_t                protocol_features;
349         int                     vid;
350         uint32_t                flags;
351         uint16_t                vhost_hlen;
352         /* to tell if we need broadcast rarp packet */
353         int16_t                 broadcast_rarp;
354         uint32_t                nr_vring;
355         int                     dequeue_zero_copy;
356         int                     extbuf;
357         int                     linearbuf;
358         struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
359         struct inflight_mem_info *inflight_info;
360 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
361         char                    ifname[IF_NAME_SZ];
362         uint64_t                log_size;
363         uint64_t                log_base;
364         uint64_t                log_addr;
365         struct rte_ether_addr   mac;
366         uint16_t                mtu;
367
368         struct vhost_device_ops const *notify_ops;
369
370         uint32_t                nr_guest_pages;
371         uint32_t                max_guest_pages;
372         struct guest_page       *guest_pages;
373
374         int                     slave_req_fd;
375         rte_spinlock_t          slave_req_lock;
376
377         int                     postcopy_ufd;
378         int                     postcopy_listening;
379
380         struct rte_vdpa_device *vdpa_dev;
381
382         /* context data for the external message handlers */
383         void                    *extern_data;
384         /* pre and post vhost user message handlers for the device */
385         struct rte_vhost_user_extern_ops extern_ops;
386 } __rte_cache_aligned;
387
388 static __rte_always_inline bool
389 vq_is_packed(struct virtio_net *dev)
390 {
391         return dev->features & (1ull << VIRTIO_F_RING_PACKED);
392 }
393
394 static inline bool
395 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
396 {
397         uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
398
399         return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
400                 wrap_counter != !!(flags & VRING_DESC_F_USED);
401 }
402
403 static inline void
404 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
405 {
406         vq->last_used_idx += num;
407         if (vq->last_used_idx >= vq->size) {
408                 vq->used_wrap_counter ^= 1;
409                 vq->last_used_idx -= vq->size;
410         }
411 }
412
413 static inline void
414 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
415 {
416         vq->last_avail_idx += num;
417         if (vq->last_avail_idx >= vq->size) {
418                 vq->avail_wrap_counter ^= 1;
419                 vq->last_avail_idx -= vq->size;
420         }
421 }
422
423 void __vhost_log_cache_write(struct virtio_net *dev,
424                 struct vhost_virtqueue *vq,
425                 uint64_t addr, uint64_t len);
426 void __vhost_log_cache_write_iova(struct virtio_net *dev,
427                 struct vhost_virtqueue *vq,
428                 uint64_t iova, uint64_t len);
429 void __vhost_log_cache_sync(struct virtio_net *dev,
430                 struct vhost_virtqueue *vq);
431 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
432 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
433                             uint64_t iova, uint64_t len);
434
435 static __rte_always_inline void
436 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
437 {
438         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
439                 __vhost_log_write(dev, addr, len);
440 }
441
442 static __rte_always_inline void
443 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
444 {
445         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
446                 __vhost_log_cache_sync(dev, vq);
447 }
448
449 static __rte_always_inline void
450 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
451                         uint64_t addr, uint64_t len)
452 {
453         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
454                 __vhost_log_cache_write(dev, vq, addr, len);
455 }
456
457 static __rte_always_inline void
458 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
459                         uint64_t offset, uint64_t len)
460 {
461         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
462                 if (unlikely(vq->log_guest_addr == 0))
463                         return;
464                 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
465                                         len);
466         }
467 }
468
469 static __rte_always_inline void
470 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
471                      uint64_t offset, uint64_t len)
472 {
473         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
474                 if (unlikely(vq->log_guest_addr == 0))
475                         return;
476                 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
477         }
478 }
479
480 static __rte_always_inline void
481 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
482                            uint64_t iova, uint64_t len)
483 {
484         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
485                 return;
486
487         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
488                 __vhost_log_cache_write_iova(dev, vq, iova, len);
489         else
490                 __vhost_log_cache_write(dev, vq, iova, len);
491 }
492
493 static __rte_always_inline void
494 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
495                            uint64_t iova, uint64_t len)
496 {
497         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
498                 return;
499
500         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
501                 __vhost_log_write_iova(dev, vq, iova, len);
502         else
503                 __vhost_log_write(dev, iova, len);
504 }
505
506 extern int vhost_config_log_level;
507 extern int vhost_data_log_level;
508
509 #define VHOST_LOG_CONFIG(level, fmt, args...)                   \
510         rte_log(RTE_LOG_ ## level, vhost_config_log_level,      \
511                 "VHOST_CONFIG: " fmt, ##args)
512
513 #define VHOST_LOG_DATA(level, fmt, args...) \
514         (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?        \
515          rte_log(RTE_LOG_ ## level,  vhost_data_log_level,      \
516                 "VHOST_DATA : " fmt, ##args) :                  \
517          0)
518
519 #ifdef RTE_LIBRTE_VHOST_DEBUG
520 #define VHOST_MAX_PRINT_BUFF 6072
521 #define PRINT_PACKET(device, addr, size, header) do { \
522         char *pkt_addr = (char *)(addr); \
523         unsigned int index; \
524         char packet[VHOST_MAX_PRINT_BUFF]; \
525         \
526         if ((header)) \
527                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
528         else \
529                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
530         for (index = 0; index < (size); index++) { \
531                 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
532                         "%02hhx ", pkt_addr[index]); \
533         } \
534         snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
535         \
536         VHOST_LOG_DATA(DEBUG, "%s", packet); \
537 } while (0)
538 #else
539 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
540 #endif
541
542 #define MAX_VHOST_DEVICE        1024
543 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
544
545 #define VHOST_BINARY_SEARCH_THRESH 256
546
547 static __rte_always_inline int guest_page_addrcmp(const void *p1,
548                                                 const void *p2)
549 {
550         const struct guest_page *page1 = (const struct guest_page *)p1;
551         const struct guest_page *page2 = (const struct guest_page *)p2;
552
553         if (page1->guest_phys_addr > page2->guest_phys_addr)
554                 return 1;
555         if (page1->guest_phys_addr < page2->guest_phys_addr)
556                 return -1;
557
558         return 0;
559 }
560
561 /* Convert guest physical address to host physical address */
562 static __rte_always_inline rte_iova_t
563 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
564 {
565         uint32_t i;
566         struct guest_page *page;
567         struct guest_page key;
568
569         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
570                 key.guest_phys_addr = gpa;
571                 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
572                                sizeof(struct guest_page), guest_page_addrcmp);
573                 if (page) {
574                         if (gpa + size < page->guest_phys_addr + page->size)
575                                 return gpa - page->guest_phys_addr +
576                                         page->host_phys_addr;
577                 }
578         } else {
579                 for (i = 0; i < dev->nr_guest_pages; i++) {
580                         page = &dev->guest_pages[i];
581
582                         if (gpa >= page->guest_phys_addr &&
583                             gpa + size < page->guest_phys_addr +
584                             page->size)
585                                 return gpa - page->guest_phys_addr +
586                                        page->host_phys_addr;
587                 }
588         }
589
590         return 0;
591 }
592
593 static __rte_always_inline uint64_t
594 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
595 {
596         struct rte_vhost_mem_region *r;
597         uint32_t i;
598
599         if (unlikely(!dev || !dev->mem))
600                 return 0;
601
602         for (i = 0; i < dev->mem->nregions; i++) {
603                 r = &dev->mem->regions[i];
604
605                 if (vva >= r->host_user_addr &&
606                     vva + len <  r->host_user_addr + r->size) {
607                         return r->guest_phys_addr + vva - r->host_user_addr;
608                 }
609         }
610         return 0;
611 }
612
613 static __rte_always_inline struct virtio_net *
614 get_device(int vid)
615 {
616         struct virtio_net *dev = vhost_devices[vid];
617
618         if (unlikely(!dev)) {
619                 VHOST_LOG_CONFIG(ERR,
620                         "(%d) device not found.\n", vid);
621         }
622
623         return dev;
624 }
625
626 int vhost_new_device(void);
627 void cleanup_device(struct virtio_net *dev, int destroy);
628 void reset_device(struct virtio_net *dev);
629 void vhost_destroy_device(int);
630 void vhost_destroy_device_notify(struct virtio_net *dev);
631
632 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
633 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
634 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
635
636 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
637
638 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
639
640 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
641 void vhost_enable_dequeue_zero_copy(int vid);
642 void vhost_set_builtin_virtio_net(int vid, bool enable);
643 void vhost_enable_extbuf(int vid);
644 void vhost_enable_linearbuf(int vid);
645
646 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
647
648 /*
649  * Backend-specific cleanup.
650  *
651  * TODO: fix it; we have one backend now
652  */
653 void vhost_backend_cleanup(struct virtio_net *dev);
654
655 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
656                         uint64_t iova, uint64_t *len, uint8_t perm);
657 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
658                         struct vhost_virtqueue *vq,
659                         uint64_t desc_addr, uint64_t desc_len);
660 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
661 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
662                 uint64_t log_addr);
663 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
664
665 static __rte_always_inline uint64_t
666 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
667                         uint64_t iova, uint64_t *len, uint8_t perm)
668 {
669         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
670                 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
671
672         return __vhost_iova_to_vva(dev, vq, iova, len, perm);
673 }
674
675 #define vhost_avail_event(vr) \
676         (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
677 #define vhost_used_event(vr) \
678         (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
679
680 /*
681  * The following is used with VIRTIO_RING_F_EVENT_IDX.
682  * Assuming a given event_idx value from the other size, if we have
683  * just incremented index from old to new_idx, should we trigger an
684  * event?
685  */
686 static __rte_always_inline int
687 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
688 {
689         return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
690 }
691
692 static __rte_always_inline void
693 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
694 {
695         /* Flush used->idx update before we read avail->flags. */
696         rte_smp_mb();
697
698         /* Don't kick guest if we don't reach index specified by guest. */
699         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
700                 uint16_t old = vq->signalled_used;
701                 uint16_t new = vq->last_used_idx;
702                 bool signalled_used_valid = vq->signalled_used_valid;
703
704                 vq->signalled_used = new;
705                 vq->signalled_used_valid = true;
706
707                 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
708                         __func__,
709                         vhost_used_event(vq),
710                         old, new);
711
712                 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
713                                         (vq->callfd >= 0)) ||
714                                 unlikely(!signalled_used_valid)) {
715                         eventfd_write(vq->callfd, (eventfd_t) 1);
716                         if (dev->notify_ops->guest_notified)
717                                 dev->notify_ops->guest_notified(dev->vid);
718                 }
719         } else {
720                 /* Kick the guest if necessary. */
721                 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
722                                 && (vq->callfd >= 0)) {
723                         eventfd_write(vq->callfd, (eventfd_t)1);
724                         if (dev->notify_ops->guest_notified)
725                                 dev->notify_ops->guest_notified(dev->vid);
726                 }
727         }
728 }
729
730 static __rte_always_inline void
731 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
732 {
733         uint16_t old, new, off, off_wrap;
734         bool signalled_used_valid, kick = false;
735
736         /* Flush used desc update. */
737         rte_smp_mb();
738
739         if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
740                 if (vq->driver_event->flags !=
741                                 VRING_EVENT_F_DISABLE)
742                         kick = true;
743                 goto kick;
744         }
745
746         old = vq->signalled_used;
747         new = vq->last_used_idx;
748         vq->signalled_used = new;
749         signalled_used_valid = vq->signalled_used_valid;
750         vq->signalled_used_valid = true;
751
752         if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
753                 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
754                         kick = true;
755                 goto kick;
756         }
757
758         if (unlikely(!signalled_used_valid)) {
759                 kick = true;
760                 goto kick;
761         }
762
763         rte_smp_rmb();
764
765         off_wrap = vq->driver_event->off_wrap;
766         off = off_wrap & ~(1 << 15);
767
768         if (new <= old)
769                 old -= vq->size;
770
771         if (vq->used_wrap_counter != off_wrap >> 15)
772                 off -= vq->size;
773
774         if (vhost_need_event(off, new, old))
775                 kick = true;
776 kick:
777         if (kick) {
778                 eventfd_write(vq->callfd, (eventfd_t)1);
779                 if (dev->notify_ops->guest_notified)
780                         dev->notify_ops->guest_notified(dev->vid);
781         }
782 }
783
784 static __rte_always_inline void
785 free_ind_table(void *idesc)
786 {
787         rte_free(idesc);
788 }
789
790 static __rte_always_inline void
791 restore_mbuf(struct rte_mbuf *m)
792 {
793         uint32_t mbuf_size, priv_size;
794
795         while (m) {
796                 priv_size = rte_pktmbuf_priv_size(m->pool);
797                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
798                 /* start of buffer is after mbuf structure and priv data */
799
800                 m->buf_addr = (char *)m + mbuf_size;
801                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
802                 m = m->next;
803         }
804 }
805
806 static __rte_always_inline bool
807 mbuf_is_consumed(struct rte_mbuf *m)
808 {
809         while (m) {
810                 if (rte_mbuf_refcnt_read(m) > 1)
811                         return false;
812                 m = m->next;
813         }
814
815         return true;
816 }
817
818 static __rte_always_inline void
819 put_zmbuf(struct zcopy_mbuf *zmbuf)
820 {
821         zmbuf->in_use = 0;
822 }
823
824 #endif /* _VHOST_NET_CDEV_H_ */