vhost: fix field naming in guest page struct
[dpdk.git] / lib / vhost / vhost.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <unistd.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
16 #include <linux/if.h>
17
18 #include <rte_log.h>
19 #include <rte_ether.h>
20 #include <rte_malloc.h>
21 #include <rte_dmadev.h>
22
23 #include "rte_vhost.h"
24 #include "vdpa_driver.h"
25
26 #include "rte_vhost_async.h"
27
28 /* Used to indicate that the device is running on a data core */
29 #define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0)
30 /* Used to indicate that the device is ready to operate */
31 #define VIRTIO_DEV_READY ((uint32_t)1 << 1)
32 /* Used to indicate that the built-in vhost net device backend is enabled */
33 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2)
34 /* Used to indicate that the device has its own data path and configured */
35 #define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3)
36 /* Used to indicate that the feature negotiation failed */
37 #define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
38 /* Used to indicate that the virtio_net tx code should fill TX ol_flags */
39 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
40
41 /* Backend value set by guest. */
42 #define VIRTIO_DEV_STOPPED -1
43
44 #define BUF_VECTOR_MAX 256
45
46 #define VHOST_LOG_CACHE_NR 32
47
48 #define MAX_PKT_BURST 32
49
50 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST)
51 #define VHOST_MAX_ASYNC_VEC 2048
52 #define VIRTIO_MAX_RX_PKTLEN 9728U
53 #define VHOST_DMA_MAX_COPY_COMPLETE ((VIRTIO_MAX_RX_PKTLEN / RTE_MBUF_DEFAULT_DATAROOM) \
54                 * MAX_PKT_BURST)
55
56 #define PACKED_DESC_ENQUEUE_USED_FLAG(w)        \
57         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
58                 VRING_DESC_F_WRITE)
59 #define PACKED_DESC_DEQUEUE_USED_FLAG(w)        \
60         ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
61 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
62                                          VRING_DESC_F_INDIRECT)
63
64 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
65                             sizeof(struct vring_packed_desc))
66 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
67
68 #ifdef VHOST_GCC_UNROLL_PRAGMA
69 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
70         for (iter = val; iter < size; iter++)
71 #endif
72
73 #ifdef VHOST_CLANG_UNROLL_PRAGMA
74 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
75         for (iter = val; iter < size; iter++)
76 #endif
77
78 #ifdef VHOST_ICC_UNROLL_PRAGMA
79 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
80         for (iter = val; iter < size; iter++)
81 #endif
82
83 #ifndef vhost_for_each_try_unroll
84 #define vhost_for_each_try_unroll(iter, val, num) \
85         for (iter = val; iter < num; iter++)
86 #endif
87
88 /**
89  * Structure contains buffer address, length and descriptor index
90  * from vring to do scatter RX.
91  */
92 struct buf_vector {
93         uint64_t buf_iova;
94         uint64_t buf_addr;
95         uint32_t buf_len;
96         uint32_t desc_idx;
97 };
98
99 /*
100  * Structure contains the info for each batched memory copy.
101  */
102 struct batch_copy_elem {
103         void *dst;
104         void *src;
105         uint32_t len;
106         uint64_t log_addr;
107 };
108
109 /*
110  * Structure that contains the info for batched dirty logging.
111  */
112 struct log_cache_entry {
113         uint32_t offset;
114         unsigned long val;
115 };
116
117 struct vring_used_elem_packed {
118         uint16_t id;
119         uint16_t flags;
120         uint32_t len;
121         uint32_t count;
122 };
123
124 /**
125  * iovec
126  */
127 struct vhost_iovec {
128         void *src_addr;
129         void *dst_addr;
130         size_t len;
131 };
132
133 /**
134  * iovec iterator
135  */
136 struct vhost_iov_iter {
137         /** pointer to the iovec array */
138         struct vhost_iovec *iov;
139         /** number of iovec in this iterator */
140         unsigned long nr_segs;
141 };
142
143 struct async_dma_vchan_info {
144         /* circular array to track if packet copy completes */
145         bool **pkts_cmpl_flag_addr;
146
147         /* max elements in 'pkts_cmpl_flag_addr' */
148         uint16_t ring_size;
149         /* ring index mask for 'pkts_cmpl_flag_addr' */
150         uint16_t ring_mask;
151
152         /**
153          * DMA virtual channel lock. Although it is able to bind DMA
154          * virtual channels to data plane threads, vhost control plane
155          * thread could call data plane functions too, thus causing
156          * DMA device contention.
157          *
158          * For example, in VM exit case, vhost control plane thread needs
159          * to clear in-flight packets before disable vring, but there could
160          * be anotther data plane thread is enqueuing packets to the same
161          * vring with the same DMA virtual channel. As dmadev PMD functions
162          * are lock-free, the control plane and data plane threads could
163          * operate the same DMA virtual channel at the same time.
164          */
165         rte_spinlock_t dma_lock;
166 };
167
168 struct async_dma_info {
169         struct async_dma_vchan_info *vchans;
170         /* number of registered virtual channels */
171         uint16_t nr_vchans;
172 };
173
174 extern struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
175
176 /**
177  * inflight async packet information
178  */
179 struct async_inflight_info {
180         struct rte_mbuf *mbuf;
181         uint16_t descs; /* num of descs inflight */
182         uint16_t nr_buffers; /* num of buffers inflight for packed ring */
183 };
184
185 struct vhost_async {
186         struct vhost_iov_iter iov_iter[VHOST_MAX_ASYNC_IT];
187         struct vhost_iovec iovec[VHOST_MAX_ASYNC_VEC];
188         uint16_t iter_idx;
189         uint16_t iovec_idx;
190
191         /* data transfer status */
192         struct async_inflight_info *pkts_info;
193         /**
194          * Packet reorder array. "true" indicates that DMA device
195          * completes all copies for the packet.
196          *
197          * Note that this array could be written by multiple threads
198          * simultaneously. For example, in the case of thread0 and
199          * thread1 RX packets from NIC and then enqueue packets to
200          * vring0 and vring1 with own DMA device DMA0 and DMA1, it's
201          * possible for thread0 to get completed copies belonging to
202          * vring1 from DMA0, while thread0 is calling rte_vhost_poll
203          * _enqueue_completed() for vring0 and thread1 is calling
204          * rte_vhost_submit_enqueue_burst() for vring1. In this case,
205          * vq->access_lock cannot protect pkts_cmpl_flag of vring1.
206          *
207          * However, since offloading is per-packet basis, each packet
208          * flag will only be written by one thread. And single byte
209          * write is atomic, so no lock for pkts_cmpl_flag is needed.
210          */
211         bool *pkts_cmpl_flag;
212         uint16_t pkts_idx;
213         uint16_t pkts_inflight_n;
214         union {
215                 struct vring_used_elem  *descs_split;
216                 struct vring_used_elem_packed *buffers_packed;
217         };
218         union {
219                 uint16_t desc_idx_split;
220                 uint16_t buffer_idx_packed;
221         };
222         union {
223                 uint16_t last_desc_idx_split;
224                 uint16_t last_buffer_idx_packed;
225         };
226 };
227
228 /**
229  * Structure contains variables relevant to RX/TX virtqueues.
230  */
231 struct vhost_virtqueue {
232         union {
233                 struct vring_desc       *desc;
234                 struct vring_packed_desc   *desc_packed;
235         };
236         union {
237                 struct vring_avail      *avail;
238                 struct vring_packed_desc_event *driver_event;
239         };
240         union {
241                 struct vring_used       *used;
242                 struct vring_packed_desc_event *device_event;
243         };
244         uint16_t                size;
245
246         uint16_t                last_avail_idx;
247         uint16_t                last_used_idx;
248         /* Last used index we notify to front end. */
249         uint16_t                signalled_used;
250         bool                    signalled_used_valid;
251 #define VIRTIO_INVALID_EVENTFD          (-1)
252 #define VIRTIO_UNINITIALIZED_EVENTFD    (-2)
253
254         bool                    enabled;
255         bool                    access_ok;
256         bool                    ready;
257
258         rte_spinlock_t          access_lock;
259
260
261         union {
262                 struct vring_used_elem  *shadow_used_split;
263                 struct vring_used_elem_packed *shadow_used_packed;
264         };
265         uint16_t                shadow_used_idx;
266         /* Record packed ring enqueue latest desc cache aligned index */
267         uint16_t                shadow_aligned_idx;
268         /* Record packed ring first dequeue desc index */
269         uint16_t                shadow_last_used_idx;
270
271         uint16_t                batch_copy_nb_elems;
272         struct batch_copy_elem  *batch_copy_elems;
273         int                     numa_node;
274         bool                    used_wrap_counter;
275         bool                    avail_wrap_counter;
276
277         /* Physical address of used ring, for logging */
278         uint16_t                log_cache_nb_elem;
279         uint64_t                log_guest_addr;
280         struct log_cache_entry  *log_cache;
281
282         rte_rwlock_t    iotlb_lock;
283         rte_rwlock_t    iotlb_pending_lock;
284         struct rte_mempool *iotlb_pool;
285         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
286         TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
287         int                             iotlb_cache_nr;
288
289         /* Used to notify the guest (trigger interrupt) */
290         int                     callfd;
291         /* Currently unused as polling mode is enabled */
292         int                     kickfd;
293
294         /* inflight share memory info */
295         union {
296                 struct rte_vhost_inflight_info_split *inflight_split;
297                 struct rte_vhost_inflight_info_packed *inflight_packed;
298         };
299         struct rte_vhost_resubmit_info *resubmit_inflight;
300         uint64_t                global_counter;
301
302         struct vhost_async      *async;
303
304         int                     notif_enable;
305 #define VIRTIO_UNINITIALIZED_NOTIF      (-1)
306
307         struct vhost_vring_addr ring_addrs;
308 } __rte_cache_aligned;
309
310 /* Virtio device status as per Virtio specification */
311 #define VIRTIO_DEVICE_STATUS_RESET              0x00
312 #define VIRTIO_DEVICE_STATUS_ACK                0x01
313 #define VIRTIO_DEVICE_STATUS_DRIVER             0x02
314 #define VIRTIO_DEVICE_STATUS_DRIVER_OK          0x04
315 #define VIRTIO_DEVICE_STATUS_FEATURES_OK        0x08
316 #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET     0x40
317 #define VIRTIO_DEVICE_STATUS_FAILED             0x80
318
319 #define VHOST_MAX_VRING                 0x100
320 #define VHOST_MAX_QUEUE_PAIRS           0x80
321
322 /* Declare IOMMU related bits for older kernels */
323 #ifndef VIRTIO_F_IOMMU_PLATFORM
324
325 #define VIRTIO_F_IOMMU_PLATFORM 33
326
327 struct vhost_iotlb_msg {
328         __u64 iova;
329         __u64 size;
330         __u64 uaddr;
331 #define VHOST_ACCESS_RO      0x1
332 #define VHOST_ACCESS_WO      0x2
333 #define VHOST_ACCESS_RW      0x3
334         __u8 perm;
335 #define VHOST_IOTLB_MISS           1
336 #define VHOST_IOTLB_UPDATE         2
337 #define VHOST_IOTLB_INVALIDATE     3
338 #define VHOST_IOTLB_ACCESS_FAIL    4
339         __u8 type;
340 };
341
342 #define VHOST_IOTLB_MSG 0x1
343
344 struct vhost_msg {
345         int type;
346         union {
347                 struct vhost_iotlb_msg iotlb;
348                 __u8 padding[64];
349         };
350 };
351 #endif
352
353 /*
354  * Define virtio 1.0 for older kernels
355  */
356 #ifndef VIRTIO_F_VERSION_1
357  #define VIRTIO_F_VERSION_1 32
358 #endif
359
360 /* Declare packed ring related bits for older kernels */
361 #ifndef VIRTIO_F_RING_PACKED
362
363 #define VIRTIO_F_RING_PACKED 34
364
365 struct vring_packed_desc {
366         uint64_t addr;
367         uint32_t len;
368         uint16_t id;
369         uint16_t flags;
370 };
371
372 struct vring_packed_desc_event {
373         uint16_t off_wrap;
374         uint16_t flags;
375 };
376 #endif
377
378 /*
379  * Declare below packed ring defines unconditionally
380  * as Kernel header might use different names.
381  */
382 #define VRING_DESC_F_AVAIL      (1ULL << 7)
383 #define VRING_DESC_F_USED       (1ULL << 15)
384
385 #define VRING_EVENT_F_ENABLE 0x0
386 #define VRING_EVENT_F_DISABLE 0x1
387 #define VRING_EVENT_F_DESC 0x2
388
389 /*
390  * Available and used descs are in same order
391  */
392 #ifndef VIRTIO_F_IN_ORDER
393 #define VIRTIO_F_IN_ORDER      35
394 #endif
395
396 /* Features supported by this builtin vhost-user net driver. */
397 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
398                                 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
399                                 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
400                                 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
401                                 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
402                                 (1ULL << VIRTIO_NET_F_MQ)      | \
403                                 (1ULL << VIRTIO_F_VERSION_1)   | \
404                                 (1ULL << VHOST_F_LOG_ALL)      | \
405                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
406                                 (1ULL << VIRTIO_NET_F_GSO) | \
407                                 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
408                                 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
409                                 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
410                                 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
411                                 (1ULL << VIRTIO_NET_F_CSUM)    | \
412                                 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
413                                 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
414                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
415                                 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
416                                 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
417                                 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
418                                 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
419                                 (1ULL << VIRTIO_NET_F_MTU)  | \
420                                 (1ULL << VIRTIO_F_IN_ORDER) | \
421                                 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
422                                 (1ULL << VIRTIO_F_RING_PACKED))
423
424
425 struct guest_page {
426         uint64_t guest_phys_addr;
427         uint64_t host_iova;
428         uint64_t size;
429 };
430
431 struct inflight_mem_info {
432         int             fd;
433         void            *addr;
434         uint64_t        size;
435 };
436
437 /**
438  * Device structure contains all configuration information relating
439  * to the device.
440  */
441 struct virtio_net {
442         /* Frontend (QEMU) memory and memory region information */
443         struct rte_vhost_memory *mem;
444         uint64_t                features;
445         uint64_t                protocol_features;
446         int                     vid;
447         uint32_t                flags;
448         uint16_t                vhost_hlen;
449         /* to tell if we need broadcast rarp packet */
450         int16_t                 broadcast_rarp;
451         uint32_t                nr_vring;
452         int                     async_copy;
453
454         int                     extbuf;
455         int                     linearbuf;
456         struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
457         struct inflight_mem_info *inflight_info;
458 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
459         char                    ifname[IF_NAME_SZ];
460         uint64_t                log_size;
461         uint64_t                log_base;
462         uint64_t                log_addr;
463         struct rte_ether_addr   mac;
464         uint16_t                mtu;
465         uint8_t                 status;
466
467         struct rte_vhost_device_ops const *notify_ops;
468
469         uint32_t                nr_guest_pages;
470         uint32_t                max_guest_pages;
471         struct guest_page       *guest_pages;
472
473         int                     slave_req_fd;
474         rte_spinlock_t          slave_req_lock;
475
476         int                     postcopy_ufd;
477         int                     postcopy_listening;
478
479         struct rte_vdpa_device *vdpa_dev;
480
481         /* context data for the external message handlers */
482         void                    *extern_data;
483         /* pre and post vhost user message handlers for the device */
484         struct rte_vhost_user_extern_ops extern_ops;
485 } __rte_cache_aligned;
486
487 static __rte_always_inline bool
488 vq_is_packed(struct virtio_net *dev)
489 {
490         return dev->features & (1ull << VIRTIO_F_RING_PACKED);
491 }
492
493 static inline bool
494 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
495 {
496         uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
497
498         return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
499                 wrap_counter != !!(flags & VRING_DESC_F_USED);
500 }
501
502 static inline void
503 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
504 {
505         vq->last_used_idx += num;
506         if (vq->last_used_idx >= vq->size) {
507                 vq->used_wrap_counter ^= 1;
508                 vq->last_used_idx -= vq->size;
509         }
510 }
511
512 static inline void
513 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
514 {
515         vq->last_avail_idx += num;
516         if (vq->last_avail_idx >= vq->size) {
517                 vq->avail_wrap_counter ^= 1;
518                 vq->last_avail_idx -= vq->size;
519         }
520 }
521
522 void __vhost_log_cache_write(struct virtio_net *dev,
523                 struct vhost_virtqueue *vq,
524                 uint64_t addr, uint64_t len);
525 void __vhost_log_cache_write_iova(struct virtio_net *dev,
526                 struct vhost_virtqueue *vq,
527                 uint64_t iova, uint64_t len);
528 void __vhost_log_cache_sync(struct virtio_net *dev,
529                 struct vhost_virtqueue *vq);
530 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
531 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
532                             uint64_t iova, uint64_t len);
533
534 static __rte_always_inline void
535 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
536 {
537         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
538                 __vhost_log_write(dev, addr, len);
539 }
540
541 static __rte_always_inline void
542 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
543 {
544         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
545                 __vhost_log_cache_sync(dev, vq);
546 }
547
548 static __rte_always_inline void
549 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
550                         uint64_t addr, uint64_t len)
551 {
552         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
553                 __vhost_log_cache_write(dev, vq, addr, len);
554 }
555
556 static __rte_always_inline void
557 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
558                         uint64_t offset, uint64_t len)
559 {
560         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
561                 if (unlikely(vq->log_guest_addr == 0))
562                         return;
563                 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
564                                         len);
565         }
566 }
567
568 static __rte_always_inline void
569 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
570                      uint64_t offset, uint64_t len)
571 {
572         if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
573                 if (unlikely(vq->log_guest_addr == 0))
574                         return;
575                 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
576         }
577 }
578
579 static __rte_always_inline void
580 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
581                            uint64_t iova, uint64_t len)
582 {
583         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
584                 return;
585
586         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
587                 __vhost_log_cache_write_iova(dev, vq, iova, len);
588         else
589                 __vhost_log_cache_write(dev, vq, iova, len);
590 }
591
592 static __rte_always_inline void
593 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
594                            uint64_t iova, uint64_t len)
595 {
596         if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
597                 return;
598
599         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
600                 __vhost_log_write_iova(dev, vq, iova, len);
601         else
602                 __vhost_log_write(dev, iova, len);
603 }
604
605 extern int vhost_config_log_level;
606 extern int vhost_data_log_level;
607
608 #define VHOST_LOG_CONFIG(level, fmt, args...)                   \
609         rte_log(RTE_LOG_ ## level, vhost_config_log_level,      \
610                 "VHOST_CONFIG: " fmt, ##args)
611
612 #define VHOST_LOG_DATA(level, fmt, args...) \
613         (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?        \
614          rte_log(RTE_LOG_ ## level,  vhost_data_log_level,      \
615                 "VHOST_DATA : " fmt, ##args) :                  \
616          0)
617
618 #ifdef RTE_LIBRTE_VHOST_DEBUG
619 #define VHOST_MAX_PRINT_BUFF 6072
620 #define PRINT_PACKET(device, addr, size, header) do { \
621         char *pkt_addr = (char *)(addr); \
622         unsigned int index; \
623         char packet[VHOST_MAX_PRINT_BUFF]; \
624         \
625         if ((header)) \
626                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
627         else \
628                 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
629         for (index = 0; index < (size); index++) { \
630                 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
631                         "%02hhx ", pkt_addr[index]); \
632         } \
633         snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
634         \
635         VHOST_LOG_DATA(DEBUG, "%s", packet); \
636 } while (0)
637 #else
638 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
639 #endif
640
641 extern struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
642
643 #define VHOST_BINARY_SEARCH_THRESH 256
644
645 static __rte_always_inline int guest_page_addrcmp(const void *p1,
646                                                 const void *p2)
647 {
648         const struct guest_page *page1 = (const struct guest_page *)p1;
649         const struct guest_page *page2 = (const struct guest_page *)p2;
650
651         if (page1->guest_phys_addr > page2->guest_phys_addr)
652                 return 1;
653         if (page1->guest_phys_addr < page2->guest_phys_addr)
654                 return -1;
655
656         return 0;
657 }
658
659 static __rte_always_inline int guest_page_rangecmp(const void *p1, const void *p2)
660 {
661         const struct guest_page *page1 = (const struct guest_page *)p1;
662         const struct guest_page *page2 = (const struct guest_page *)p2;
663
664         if (page1->guest_phys_addr >= page2->guest_phys_addr) {
665                 if (page1->guest_phys_addr < page2->guest_phys_addr + page2->size)
666                         return 0;
667                 else
668                         return 1;
669         } else
670                 return -1;
671 }
672
673 static __rte_always_inline rte_iova_t
674 gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
675         uint64_t gpa_size, uint64_t *hpa_size)
676 {
677         uint32_t i;
678         struct guest_page *page;
679         struct guest_page key;
680
681         *hpa_size = gpa_size;
682         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
683                 key.guest_phys_addr = gpa;
684                 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
685                                sizeof(struct guest_page), guest_page_rangecmp);
686                 if (page) {
687                         if (gpa + gpa_size <=
688                                         page->guest_phys_addr + page->size) {
689                                 return gpa - page->guest_phys_addr +
690                                         page->host_iova;
691                         } else if (gpa < page->guest_phys_addr +
692                                                 page->size) {
693                                 *hpa_size = page->guest_phys_addr +
694                                         page->size - gpa;
695                                 return gpa - page->guest_phys_addr +
696                                         page->host_iova;
697                         }
698                 }
699         } else {
700                 for (i = 0; i < dev->nr_guest_pages; i++) {
701                         page = &dev->guest_pages[i];
702
703                         if (gpa >= page->guest_phys_addr) {
704                                 if (gpa + gpa_size <=
705                                         page->guest_phys_addr + page->size) {
706                                         return gpa - page->guest_phys_addr +
707                                                 page->host_iova;
708                                 } else if (gpa < page->guest_phys_addr +
709                                                         page->size) {
710                                         *hpa_size = page->guest_phys_addr +
711                                                 page->size - gpa;
712                                         return gpa - page->guest_phys_addr +
713                                                 page->host_iova;
714                                 }
715                         }
716                 }
717         }
718
719         *hpa_size = 0;
720         return 0;
721 }
722
723 /* Convert guest physical address to host physical address */
724 static __rte_always_inline rte_iova_t
725 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
726 {
727         rte_iova_t hpa;
728         uint64_t hpa_size;
729
730         hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
731         return hpa_size == size ? hpa : 0;
732 }
733
734 static __rte_always_inline uint64_t
735 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
736 {
737         struct rte_vhost_mem_region *r;
738         uint32_t i;
739
740         if (unlikely(!dev || !dev->mem))
741                 return 0;
742
743         for (i = 0; i < dev->mem->nregions; i++) {
744                 r = &dev->mem->regions[i];
745
746                 if (vva >= r->host_user_addr &&
747                     vva + len <  r->host_user_addr + r->size) {
748                         return r->guest_phys_addr + vva - r->host_user_addr;
749                 }
750         }
751         return 0;
752 }
753
754 static __rte_always_inline struct virtio_net *
755 get_device(int vid)
756 {
757         struct virtio_net *dev = vhost_devices[vid];
758
759         if (unlikely(!dev)) {
760                 VHOST_LOG_CONFIG(ERR,
761                         "(%d) device not found.\n", vid);
762         }
763
764         return dev;
765 }
766
767 int vhost_new_device(void);
768 void cleanup_device(struct virtio_net *dev, int destroy);
769 void reset_device(struct virtio_net *dev);
770 void vhost_destroy_device(int);
771 void vhost_destroy_device_notify(struct virtio_net *dev);
772
773 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
774 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
775 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
776
777 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
778
779 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
780
781 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
782 void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags);
783 void vhost_enable_extbuf(int vid);
784 void vhost_enable_linearbuf(int vid);
785 int vhost_enable_guest_notification(struct virtio_net *dev,
786                 struct vhost_virtqueue *vq, int enable);
787
788 struct rte_vhost_device_ops const *vhost_driver_callback_get(const char *path);
789
790 /*
791  * Backend-specific cleanup.
792  *
793  * TODO: fix it; we have one backend now
794  */
795 void vhost_backend_cleanup(struct virtio_net *dev);
796
797 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
798                         uint64_t iova, uint64_t *len, uint8_t perm);
799 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
800                         struct vhost_virtqueue *vq,
801                         uint64_t desc_addr, uint64_t desc_len);
802 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
803 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
804                 uint64_t log_addr);
805 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
806
807 static __rte_always_inline uint64_t
808 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
809                         uint64_t iova, uint64_t *len, uint8_t perm)
810 {
811         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
812                 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
813
814         return __vhost_iova_to_vva(dev, vq, iova, len, perm);
815 }
816
817 #define vhost_avail_event(vr) \
818         (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
819 #define vhost_used_event(vr) \
820         (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
821
822 /*
823  * The following is used with VIRTIO_RING_F_EVENT_IDX.
824  * Assuming a given event_idx value from the other size, if we have
825  * just incremented index from old to new_idx, should we trigger an
826  * event?
827  */
828 static __rte_always_inline int
829 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
830 {
831         return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
832 }
833
834 static __rte_always_inline void
835 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
836 {
837         /* Flush used->idx update before we read avail->flags. */
838         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
839
840         /* Don't kick guest if we don't reach index specified by guest. */
841         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
842                 uint16_t old = vq->signalled_used;
843                 uint16_t new = vq->last_used_idx;
844                 bool signalled_used_valid = vq->signalled_used_valid;
845
846                 vq->signalled_used = new;
847                 vq->signalled_used_valid = true;
848
849                 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
850                         __func__,
851                         vhost_used_event(vq),
852                         old, new);
853
854                 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
855                                         (vq->callfd >= 0)) ||
856                                 unlikely(!signalled_used_valid)) {
857                         eventfd_write(vq->callfd, (eventfd_t) 1);
858                         if (dev->notify_ops->guest_notified)
859                                 dev->notify_ops->guest_notified(dev->vid);
860                 }
861         } else {
862                 /* Kick the guest if necessary. */
863                 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
864                                 && (vq->callfd >= 0)) {
865                         eventfd_write(vq->callfd, (eventfd_t)1);
866                         if (dev->notify_ops->guest_notified)
867                                 dev->notify_ops->guest_notified(dev->vid);
868                 }
869         }
870 }
871
872 static __rte_always_inline void
873 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
874 {
875         uint16_t old, new, off, off_wrap;
876         bool signalled_used_valid, kick = false;
877
878         /* Flush used desc update. */
879         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
880
881         if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
882                 if (vq->driver_event->flags !=
883                                 VRING_EVENT_F_DISABLE)
884                         kick = true;
885                 goto kick;
886         }
887
888         old = vq->signalled_used;
889         new = vq->last_used_idx;
890         vq->signalled_used = new;
891         signalled_used_valid = vq->signalled_used_valid;
892         vq->signalled_used_valid = true;
893
894         if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
895                 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
896                         kick = true;
897                 goto kick;
898         }
899
900         if (unlikely(!signalled_used_valid)) {
901                 kick = true;
902                 goto kick;
903         }
904
905         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
906
907         off_wrap = vq->driver_event->off_wrap;
908         off = off_wrap & ~(1 << 15);
909
910         if (new <= old)
911                 old -= vq->size;
912
913         if (vq->used_wrap_counter != off_wrap >> 15)
914                 off -= vq->size;
915
916         if (vhost_need_event(off, new, old))
917                 kick = true;
918 kick:
919         if (kick) {
920                 eventfd_write(vq->callfd, (eventfd_t)1);
921                 if (dev->notify_ops->guest_notified)
922                         dev->notify_ops->guest_notified(dev->vid);
923         }
924 }
925
926 static __rte_always_inline void
927 free_ind_table(void *idesc)
928 {
929         rte_free(idesc);
930 }
931
932 static __rte_always_inline void
933 restore_mbuf(struct rte_mbuf *m)
934 {
935         uint32_t mbuf_size, priv_size;
936
937         while (m) {
938                 priv_size = rte_pktmbuf_priv_size(m->pool);
939                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
940                 /* start of buffer is after mbuf structure and priv data */
941
942                 m->buf_addr = (char *)m + mbuf_size;
943                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
944                 m = m->next;
945         }
946 }
947
948 static __rte_always_inline bool
949 mbuf_is_consumed(struct rte_mbuf *m)
950 {
951         while (m) {
952                 if (rte_mbuf_refcnt_read(m) > 1)
953                         return false;
954                 m = m->next;
955         }
956
957         return true;
958 }
959
960 #endif /* _VHOST_NET_CDEV_H_ */