941a42637e53c265e850e759aa74c785613bcbf2
[dpdk.git] / lib / librte_vhost / vhost.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <unistd.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
16 #include <linux/if.h>
17
18 #include <rte_log.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
22
23 #include "rte_vhost.h"
24 #include "rte_vdpa.h"
25 #include "rte_vdpa_dev.h"
26
27 /* Used to indicate that the device is running on a data core */
28 #define VIRTIO_DEV_RUNNING 1
29 /* Used to indicate that the device is ready to operate */
30 #define VIRTIO_DEV_READY 2
31 /* Used to indicate that the built-in vhost net device backend is enabled */
32 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
33 /* Used to indicate that the device has its own data path and configured */
34 #define VIRTIO_DEV_VDPA_CONFIGURED 8
35
36 /* Backend value set by guest. */
37 #define VIRTIO_DEV_STOPPED -1
38
39 #define BUF_VECTOR_MAX 256
40
41 #define VHOST_LOG_CACHE_NR 32
42
43 #define PACKED_DESC_ENQUEUE_USED_FLAG(w)        \
44         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
45                 VRING_DESC_F_WRITE)
46 #define PACKED_DESC_DEQUEUE_USED_FLAG(w)        \
47         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
48 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
49                                          VRING_DESC_F_INDIRECT)
50
51 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
52                             sizeof(struct vring_packed_desc))
53 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
54
55 #ifdef VHOST_GCC_UNROLL_PRAGMA
56 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
57         for (iter = val; iter < size; iter++)
58 #endif
59
60 #ifdef VHOST_CLANG_UNROLL_PRAGMA
61 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
62         for (iter = val; iter < size; iter++)
63 #endif
64
65 #ifdef VHOST_ICC_UNROLL_PRAGMA
66 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
67         for (iter = val; iter < size; iter++)
68 #endif
69
70 #ifndef vhost_for_each_try_unroll
71 #define vhost_for_each_try_unroll(iter, val, num) \
72         for (iter = val; iter < num; iter++)
73 #endif
74
75 /**
76  * Structure contains buffer address, length and descriptor index
77  * from vring to do scatter RX.
78  */
79 struct buf_vector {
80         uint64_t buf_iova;
81         uint64_t buf_addr;
82         uint32_t buf_len;
83         uint32_t desc_idx;
84 };
85
86 /*
87  * A structure to hold some fields needed in zero copy code path,
88  * mainly for associating an mbuf with the right desc_idx.
89  */
90 struct zcopy_mbuf {
91         struct rte_mbuf *mbuf;
92         uint32_t desc_idx;
93         uint16_t desc_count;
94         uint16_t in_use;
95
96         TAILQ_ENTRY(zcopy_mbuf) next;
97 };
98 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
99
100 /*
101  * Structure contains the info for each batched memory copy.
102  */
103 struct batch_copy_elem {
104         void *dst;
105         void *src;
106         uint32_t len;
107         uint64_t log_addr;
108 };
109
110 /*
111  * Structure that contains the info for batched dirty logging.
112  */
113 struct log_cache_entry {
114         uint32_t offset;
115         unsigned long val;
116 };
117
118 struct vring_used_elem_packed {
119         uint16_t id;
120         uint16_t flags;
121         uint32_t len;
122         uint32_t count;
123 };
124
125 /**
126  * Structure contains variables relevant to RX/TX virtqueues.
127  */
128 struct vhost_virtqueue {
129         union {
130                 struct vring_desc       *desc;
131                 struct vring_packed_desc   *desc_packed;
132         };
133         union {
134                 struct vring_avail      *avail;
135                 struct vring_packed_desc_event *driver_event;
136         };
137         union {
138                 struct vring_used       *used;
139                 struct vring_packed_desc_event *device_event;
140         };
141         uint32_t                size;
142
143         uint16_t                last_avail_idx;
144         uint16_t                last_used_idx;
145         /* Last used index we notify to front end. */
146         uint16_t                signalled_used;
147         bool                    signalled_used_valid;
148 #define VIRTIO_INVALID_EVENTFD          (-1)
149 #define VIRTIO_UNINITIALIZED_EVENTFD    (-2)
150
151         /* Backend value to determine if device should started/stopped */
152         int                     backend;
153         int                     enabled;
154         int                     access_ok;
155         rte_spinlock_t          access_lock;
156
157         /* Used to notify the guest (trigger interrupt) */
158         int                     callfd;
159         /* Currently unused as polling mode is enabled */
160         int                     kickfd;
161
162         /* Physical address of used ring, for logging */
163         uint64_t                log_guest_addr;
164
165         /* inflight share memory info */
166         union {
167                 struct rte_vhost_inflight_info_split *inflight_split;
168                 struct rte_vhost_inflight_info_packed *inflight_packed;
169         };
170         struct rte_vhost_resubmit_info *resubmit_inflight;
171         uint64_t                global_counter;
172
173         uint16_t                nr_zmbuf;
174         uint16_t                zmbuf_size;
175         uint16_t                last_zmbuf_idx;
176         struct zcopy_mbuf       *zmbufs;
177         struct zcopy_mbuf_list  zmbuf_list;
178
179         union {
180                 struct vring_used_elem  *shadow_used_split;
181                 struct vring_used_elem_packed *shadow_used_packed;
182         };
183         uint16_t                shadow_used_idx;
184         /* Record packed ring enqueue latest desc cache aligned index */
185         uint16_t                shadow_aligned_idx;
186         /* Record packed ring first dequeue desc index */
187         uint16_t                shadow_last_used_idx;
188         struct vhost_vring_addr ring_addrs;
189
190         struct batch_copy_elem  *batch_copy_elems;
191         uint16_t                batch_copy_nb_elems;
192         bool                    used_wrap_counter;
193         bool                    avail_wrap_counter;
194
195         struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
196         uint16_t log_cache_nb_elem;
197
198         rte_rwlock_t    iotlb_lock;
199         rte_rwlock_t    iotlb_pending_lock;
200         struct rte_mempool *iotlb_pool;
201         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
202         int                             iotlb_cache_nr;
203         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
204 } __rte_cache_aligned;
205
206 /* Old kernels have no such macros defined */
207 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
208  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
209 #endif
210
211 #ifndef VIRTIO_NET_F_MQ
212  #define VIRTIO_NET_F_MQ                22
213 #endif
214
215 #define VHOST_MAX_VRING                 0x100
216 #define VHOST_MAX_QUEUE_PAIRS           0x80
217
218 #ifndef VIRTIO_NET_F_MTU
219  #define VIRTIO_NET_F_MTU 3
220 #endif
221
222 #ifndef VIRTIO_F_ANY_LAYOUT
223  #define VIRTIO_F_ANY_LAYOUT            27
224 #endif
225
226 /* Declare IOMMU related bits for older kernels */
227 #ifndef VIRTIO_F_IOMMU_PLATFORM
228
229 #define VIRTIO_F_IOMMU_PLATFORM 33
230
231 struct vhost_iotlb_msg {
232         __u64 iova;
233         __u64 size;
234         __u64 uaddr;
235 #define VHOST_ACCESS_RO      0x1
236 #define VHOST_ACCESS_WO      0x2
237 #define VHOST_ACCESS_RW      0x3
238         __u8 perm;
239 #define VHOST_IOTLB_MISS           1
240 #define VHOST_IOTLB_UPDATE         2
241 #define VHOST_IOTLB_INVALIDATE     3
242 #define VHOST_IOTLB_ACCESS_FAIL    4
243         __u8 type;
244 };
245
246 #define VHOST_IOTLB_MSG 0x1
247
248 struct vhost_msg {
249         int type;
250         union {
251                 struct vhost_iotlb_msg iotlb;
252                 __u8 padding[64];
253         };
254 };
255 #endif
256
257 /*
258  * Define virtio 1.0 for older kernels
259  */
260 #ifndef VIRTIO_F_VERSION_1
261  #define VIRTIO_F_VERSION_1 32
262 #endif
263
264 /* Declare packed ring related bits for older kernels */
265 #ifndef VIRTIO_F_RING_PACKED
266
267 #define VIRTIO_F_RING_PACKED 34
268
269 struct vring_packed_desc {
270         uint64_t addr;
271         uint32_t len;
272         uint16_t id;
273         uint16_t flags;
274 };
275
276 struct vring_packed_desc_event {
277         uint16_t off_wrap;
278         uint16_t flags;
279 };
280 #endif
281
282 /*
283  * Declare below packed ring defines unconditionally
284  * as Kernel header might use different names.
285  */
286 #define VRING_DESC_F_AVAIL      (1ULL << 7)
287 #define VRING_DESC_F_USED       (1ULL << 15)
288
289 #define VRING_EVENT_F_ENABLE 0x0
290 #define VRING_EVENT_F_DISABLE 0x1
291 #define VRING_EVENT_F_DESC 0x2
292
293 /*
294  * Available and used descs are in same order
295  */
296 #ifndef VIRTIO_F_IN_ORDER
297 #define VIRTIO_F_IN_ORDER      35
298 #endif
299
300 /* Features supported by this builtin vhost-user net driver. */
301 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
302                                 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
303                                 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
304                                 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
305                                 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
306                                 (1ULL << VIRTIO_NET_F_MQ)      | \
307                                 (1ULL << VIRTIO_F_VERSION_1)   | \
308                                 (1ULL << VHOST_F_LOG_ALL)      | \
309                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
310                                 (1ULL << VIRTIO_NET_F_GSO) | \
311                                 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
312                                 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
313                                 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
314                                 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
315                                 (1ULL << VIRTIO_NET_F_CSUM)    | \
316                                 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
317                                 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
318                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
319                                 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
320                                 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
321                                 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
322                                 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
323                                 (1ULL << VIRTIO_NET_F_MTU)  | \
324                                 (1ULL << VIRTIO_F_IN_ORDER) | \
325                                 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
326                                 (1ULL << VIRTIO_F_RING_PACKED))
327
328
329 struct guest_page {
330         uint64_t guest_phys_addr;
331         uint64_t host_phys_addr;
332         uint64_t size;
333 };
334
335 struct inflight_mem_info {
336         int             fd;
337         void            *addr;
338         uint64_t        size;
339 };
340
341 /**
342  * Device structure contains all configuration information relating
343  * to the device.
344  */
345 struct virtio_net {
346         /* Frontend (QEMU) memory and memory region information */
347         struct rte_vhost_memory *mem;
348         uint64_t                features;
349         uint64_t                protocol_features;
350         int                     vid;
351         uint32_t                flags;
352         uint16_t                vhost_hlen;
353         /* to tell if we need broadcast rarp packet */
354         int16_t                 broadcast_rarp;
355         uint32_t                nr_vring;
356         int                     dequeue_zero_copy;
357         int                     extbuf;
358         int                     linearbuf;
359         struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
360         struct inflight_mem_info *inflight_info;
361 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
362         char                    ifname[IF_NAME_SZ];
363         uint64_t                log_size;
364         uint64_t                log_base;
365         uint64_t                log_addr;
366         struct rte_ether_addr   mac;
367         uint16_t                mtu;
368
369         struct vhost_device_ops const *notify_ops;
370
371         uint32_t                nr_guest_pages;
372         uint32_t                max_guest_pages;
373         struct guest_page       *guest_pages;
374
375         int                     slave_req_fd;
376         rte_spinlock_t          slave_req_lock;
377
378         int                     postcopy_ufd;
379         int                     postcopy_listening;
380
381         struct rte_vdpa_device *vdpa_dev;
382
383         /* context data for the external message handlers */
384         void                    *extern_data;
385         /* pre and post vhost user message handlers for the device */
386         struct rte_vhost_user_extern_ops extern_ops;
387 } __rte_cache_aligned;
388
389 static __rte_always_inline bool
390 vq_is_packed(struct virtio_net *dev)
391 {
392         return dev->features & (1ull << VIRTIO_F_RING_PACKED);
393 }
394
395 static inline bool
396 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
397 {
398         uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
399
400         return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
401                 wrap_counter != !!(flags & VRING_DESC_F_USED);
402 }
403
404 static inline void
405 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
406 {
407         vq->last_used_idx += num;
408         if (vq->last_used_idx >= vq->size) {
409                 vq->used_wrap_counter ^= 1;
410                 vq->last_used_idx -= vq->size;
411         }
412 }
413
414 static inline void
415 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
416 {
417         vq->last_avail_idx += num;
418         if (vq->last_avail_idx >= vq->size) {
419                 vq->avail_wrap_counter ^= 1;
420                 vq->last_avail_idx -= vq->size;
421         }
422 }
423
424 void __vhost_log_cache_write(struct virtio_net *dev,
425                 struct vhost_virtqueue *vq,
426                 uint64_t addr, uint64_t len);
427 void __vhost_log_cache_write_iova(struct virtio_net *dev,
428                 struct vhost_virtqueue *vq,
429                 uint64_t iova, uint64_t len);
430 void __vhost_log_cache_sync(struct virtio_net *dev,
431                 struct vhost_virtqueue *vq);
432 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
433 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
434                             uint64_t iova, uint64_t len);
435
436 static __rte_always_inline void
437 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
438 {
439         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
440                 __vhost_log_write(dev, addr, len);
441 }
442
443 static __rte_always_inline void
444 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
445 {
446         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
447                 __vhost_log_cache_sync(dev, vq);
448 }
449
450 static __rte_always_inline void
451 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
452                         uint64_t addr, uint64_t len)
453 {
454         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
455                 __vhost_log_cache_write(dev, vq, addr, len);
456 }
457
458 static __rte_always_inline void
459 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
460                         uint64_t offset, uint64_t len)
461 {
462         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
463                 if (unlikely(vq->log_guest_addr == 0))
464                         return;
465                 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
466                                         len);
467         }
468 }
469
470 static __rte_always_inline void
471 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
472                      uint64_t offset, uint64_t len)
473 {
474         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
475                 if (unlikely(vq->log_guest_addr == 0))
476                         return;
477                 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
478         }
479 }
480
481 static __rte_always_inline void
482 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
483                            uint64_t iova, uint64_t len)
484 {
485         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
486                 return;
487
488         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
489                 __vhost_log_cache_write_iova(dev, vq, iova, len);
490         else
491                 __vhost_log_cache_write(dev, vq, iova, len);
492 }
493
494 static __rte_always_inline void
495 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
496                            uint64_t iova, uint64_t len)
497 {
498         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
499                 return;
500
501         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
502                 __vhost_log_write_iova(dev, vq, iova, len);
503         else
504                 __vhost_log_write(dev, iova, len);
505 }
506
507 extern int vhost_config_log_level;
508 extern int vhost_data_log_level;
509
510 #define VHOST_LOG_CONFIG(level, fmt, args...)                   \
511         rte_log(RTE_LOG_ ## level, vhost_config_log_level,      \
512                 "VHOST_CONFIG: " fmt, ##args)
513
514 #define VHOST_LOG_DATA(level, fmt, args...) \
515         (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?        \
516          rte_log(RTE_LOG_ ## level,  vhost_data_log_level,      \
517                 "VHOST_DATA : " fmt, ##args) :                  \
518          0)
519
520 #ifdef RTE_LIBRTE_VHOST_DEBUG
521 #define VHOST_MAX_PRINT_BUFF 6072
522 #define PRINT_PACKET(device, addr, size, header) do { \
523         char *pkt_addr = (char *)(addr); \
524         unsigned int index; \
525         char packet[VHOST_MAX_PRINT_BUFF]; \
526         \
527         if ((header)) \
528                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
529         else \
530                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
531         for (index = 0; index < (size); index++) { \
532                 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
533                         "%02hhx ", pkt_addr[index]); \
534         } \
535         snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
536         \
537         VHOST_LOG_DATA(DEBUG, "%s", packet); \
538 } while (0)
539 #else
540 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
541 #endif
542
543 #define MAX_VHOST_DEVICE        1024
544 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
545
546 #define VHOST_BINARY_SEARCH_THRESH 256
547
548 static __rte_always_inline int guest_page_addrcmp(const void *p1,
549                                                 const void *p2)
550 {
551         const struct guest_page *page1 = (const struct guest_page *)p1;
552         const struct guest_page *page2 = (const struct guest_page *)p2;
553
554         if (page1->guest_phys_addr > page2->guest_phys_addr)
555                 return 1;
556         if (page1->guest_phys_addr < page2->guest_phys_addr)
557                 return -1;
558
559         return 0;
560 }
561
562 /* Convert guest physical address to host physical address */
563 static __rte_always_inline rte_iova_t
564 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
565 {
566         uint32_t i;
567         struct guest_page *page;
568         struct guest_page key;
569
570         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
571                 key.guest_phys_addr = gpa;
572                 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
573                                sizeof(struct guest_page), guest_page_addrcmp);
574                 if (page) {
575                         if (gpa + size < page->guest_phys_addr + page->size)
576                                 return gpa - page->guest_phys_addr +
577                                         page->host_phys_addr;
578                 }
579         } else {
580                 for (i = 0; i < dev->nr_guest_pages; i++) {
581                         page = &dev->guest_pages[i];
582
583                         if (gpa >= page->guest_phys_addr &&
584                             gpa + size < page->guest_phys_addr +
585                             page->size)
586                                 return gpa - page->guest_phys_addr +
587                                        page->host_phys_addr;
588                 }
589         }
590
591         return 0;
592 }
593
594 static __rte_always_inline uint64_t
595 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
596 {
597         struct rte_vhost_mem_region *r;
598         uint32_t i;
599
600         if (unlikely(!dev || !dev->mem))
601                 return 0;
602
603         for (i = 0; i < dev->mem->nregions; i++) {
604                 r = &dev->mem->regions[i];
605
606                 if (vva >= r->host_user_addr &&
607                     vva + len <  r->host_user_addr + r->size) {
608                         return r->guest_phys_addr + vva - r->host_user_addr;
609                 }
610         }
611         return 0;
612 }
613
614 static __rte_always_inline struct virtio_net *
615 get_device(int vid)
616 {
617         struct virtio_net *dev = vhost_devices[vid];
618
619         if (unlikely(!dev)) {
620                 VHOST_LOG_CONFIG(ERR,
621                         "(%d) device not found.\n", vid);
622         }
623
624         return dev;
625 }
626
627 int vhost_new_device(void);
628 void cleanup_device(struct virtio_net *dev, int destroy);
629 void reset_device(struct virtio_net *dev);
630 void vhost_destroy_device(int);
631 void vhost_destroy_device_notify(struct virtio_net *dev);
632
633 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
634 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
635 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
636
637 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
638
639 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
640
641 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
642 void vhost_enable_dequeue_zero_copy(int vid);
643 void vhost_set_builtin_virtio_net(int vid, bool enable);
644 void vhost_enable_extbuf(int vid);
645 void vhost_enable_linearbuf(int vid);
646
647 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
648
649 /*
650  * Backend-specific cleanup.
651  *
652  * TODO: fix it; we have one backend now
653  */
654 void vhost_backend_cleanup(struct virtio_net *dev);
655
656 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
657                         uint64_t iova, uint64_t *len, uint8_t perm);
658 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
659                         struct vhost_virtqueue *vq,
660                         uint64_t desc_addr, uint64_t desc_len);
661 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
662 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
663                 uint64_t log_addr);
664 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
665
666 static __rte_always_inline uint64_t
667 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
668                         uint64_t iova, uint64_t *len, uint8_t perm)
669 {
670         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
671                 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
672
673         return __vhost_iova_to_vva(dev, vq, iova, len, perm);
674 }
675
676 #define vhost_avail_event(vr) \
677         (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
678 #define vhost_used_event(vr) \
679         (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
680
681 /*
682  * The following is used with VIRTIO_RING_F_EVENT_IDX.
683  * Assuming a given event_idx value from the other size, if we have
684  * just incremented index from old to new_idx, should we trigger an
685  * event?
686  */
687 static __rte_always_inline int
688 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
689 {
690         return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
691 }
692
693 static __rte_always_inline void
694 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
695 {
696         /* Flush used->idx update before we read avail->flags. */
697         rte_smp_mb();
698
699         /* Don't kick guest if we don't reach index specified by guest. */
700         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
701                 uint16_t old = vq->signalled_used;
702                 uint16_t new = vq->last_used_idx;
703                 bool signalled_used_valid = vq->signalled_used_valid;
704
705                 vq->signalled_used = new;
706                 vq->signalled_used_valid = true;
707
708                 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
709                         __func__,
710                         vhost_used_event(vq),
711                         old, new);
712
713                 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
714                                         (vq->callfd >= 0)) ||
715                                 unlikely(!signalled_used_valid)) {
716                         eventfd_write(vq->callfd, (eventfd_t) 1);
717                         if (dev->notify_ops->guest_notified)
718                                 dev->notify_ops->guest_notified(dev->vid);
719                 }
720         } else {
721                 /* Kick the guest if necessary. */
722                 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
723                                 && (vq->callfd >= 0)) {
724                         eventfd_write(vq->callfd, (eventfd_t)1);
725                         if (dev->notify_ops->guest_notified)
726                                 dev->notify_ops->guest_notified(dev->vid);
727                 }
728         }
729 }
730
731 static __rte_always_inline void
732 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
733 {
734         uint16_t old, new, off, off_wrap;
735         bool signalled_used_valid, kick = false;
736
737         /* Flush used desc update. */
738         rte_smp_mb();
739
740         if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
741                 if (vq->driver_event->flags !=
742                                 VRING_EVENT_F_DISABLE)
743                         kick = true;
744                 goto kick;
745         }
746
747         old = vq->signalled_used;
748         new = vq->last_used_idx;
749         vq->signalled_used = new;
750         signalled_used_valid = vq->signalled_used_valid;
751         vq->signalled_used_valid = true;
752
753         if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
754                 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
755                         kick = true;
756                 goto kick;
757         }
758
759         if (unlikely(!signalled_used_valid)) {
760                 kick = true;
761                 goto kick;
762         }
763
764         rte_smp_rmb();
765
766         off_wrap = vq->driver_event->off_wrap;
767         off = off_wrap & ~(1 << 15);
768
769         if (new <= old)
770                 old -= vq->size;
771
772         if (vq->used_wrap_counter != off_wrap >> 15)
773                 off -= vq->size;
774
775         if (vhost_need_event(off, new, old))
776                 kick = true;
777 kick:
778         if (kick) {
779                 eventfd_write(vq->callfd, (eventfd_t)1);
780                 if (dev->notify_ops->guest_notified)
781                         dev->notify_ops->guest_notified(dev->vid);
782         }
783 }
784
785 static __rte_always_inline void
786 free_ind_table(void *idesc)
787 {
788         rte_free(idesc);
789 }
790
791 static __rte_always_inline void
792 restore_mbuf(struct rte_mbuf *m)
793 {
794         uint32_t mbuf_size, priv_size;
795
796         while (m) {
797                 priv_size = rte_pktmbuf_priv_size(m->pool);
798                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
799                 /* start of buffer is after mbuf structure and priv data */
800
801                 m->buf_addr = (char *)m + mbuf_size;
802                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
803                 m = m->next;
804         }
805 }
806
807 static __rte_always_inline bool
808 mbuf_is_consumed(struct rte_mbuf *m)
809 {
810         while (m) {
811                 if (rte_mbuf_refcnt_read(m) > 1)
812                         return false;
813                 m = m->next;
814         }
815
816         return true;
817 }
818
819 static __rte_always_inline void
820 put_zmbuf(struct zcopy_mbuf *zmbuf)
821 {
822         zmbuf->in_use = 0;
823 }
824
825 #endif /* _VHOST_NET_CDEV_H_ */