#include "rte_vhost.h"
#include "rte_vdpa.h"
+#include "rte_vdpa_dev.h"
+
+#include "rte_vhost_async.h"
/* Used to indicate that the device is running on a data core */
#define VIRTIO_DEV_RUNNING 1
#define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
/* Used to indicate that the device has its own data path and configured */
#define VIRTIO_DEV_VDPA_CONFIGURED 8
+/* Used to indicate that the feature negotiation failed */
+#define VIRTIO_DEV_FEATURES_FAILED 16
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
#define VHOST_LOG_CACHE_NR 32
+#define MAX_PKT_BURST 32
+
+#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
+
#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
VRING_DESC_F_WRITE)
uint32_t desc_idx;
};
-/*
- * A structure to hold some fields needed in zero copy code path,
- * mainly for associating an mbuf with the right desc_idx.
- */
-struct zcopy_mbuf {
- struct rte_mbuf *mbuf;
- uint32_t desc_idx;
- uint16_t desc_count;
- uint16_t in_use;
-
- TAILQ_ENTRY(zcopy_mbuf) next;
-};
-TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
-
/*
* Structure contains the info for each batched memory copy.
*/
int backend;
int enabled;
int access_ok;
+ int ready;
+ int notif_enable;
+#define VIRTIO_UNINITIALIZED_NOTIF (-1)
+
rte_spinlock_t access_lock;
/* Used to notify the guest (trigger interrupt) */
struct rte_vhost_resubmit_info *resubmit_inflight;
uint64_t global_counter;
- uint16_t nr_zmbuf;
- uint16_t zmbuf_size;
- uint16_t last_zmbuf_idx;
- struct zcopy_mbuf *zmbufs;
- struct zcopy_mbuf_list zmbuf_list;
-
union {
struct vring_used_elem *shadow_used_split;
struct vring_used_elem_packed *shadow_used_packed;
TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
int iotlb_cache_nr;
TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
-} __rte_cache_aligned;
-/* Old kernels have no such macros defined */
-#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
- #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
-#endif
+ /* operation callbacks for async dma */
+ struct rte_vhost_async_channel_ops async_ops;
-#ifndef VIRTIO_NET_F_MQ
- #define VIRTIO_NET_F_MQ 22
-#endif
+ struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
+ struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
-#define VHOST_MAX_VRING 0x100
-#define VHOST_MAX_QUEUE_PAIRS 0x80
+ /* async data transfer status */
+ uintptr_t **async_pkts_pending;
+ struct async_inflight_info *async_pkts_info;
+ uint16_t async_pkts_idx;
+ uint16_t async_pkts_inflight_n;
+ uint16_t async_last_pkts_n;
-#ifndef VIRTIO_NET_F_MTU
- #define VIRTIO_NET_F_MTU 3
-#endif
+ /* vq async features */
+ bool async_inorder;
+ bool async_registered;
+ uint16_t async_threshold;
+} __rte_cache_aligned;
-#ifndef VIRTIO_F_ANY_LAYOUT
- #define VIRTIO_F_ANY_LAYOUT 27
-#endif
+/* Virtio device status as per Virtio specification */
+#define VIRTIO_DEVICE_STATUS_RESET 0x00
+#define VIRTIO_DEVICE_STATUS_ACK 0x01
+#define VIRTIO_DEVICE_STATUS_DRIVER 0x02
+#define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04
+#define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08
+#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40
+#define VIRTIO_DEVICE_STATUS_FAILED 0x80
+
+#define VHOST_MAX_VRING 0x100
+#define VHOST_MAX_QUEUE_PAIRS 0x80
/* Declare IOMMU related bits for older kernels */
#ifndef VIRTIO_F_IOMMU_PLATFORM
/* to tell if we need broadcast rarp packet */
int16_t broadcast_rarp;
uint32_t nr_vring;
- int dequeue_zero_copy;
+ int async_copy;
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
uint64_t log_addr;
struct rte_ether_addr mac;
uint16_t mtu;
+ uint8_t status;
struct vhost_device_ops const *notify_ops;
return 0;
}
+static __rte_always_inline rte_iova_t
+gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
+ uint64_t gpa_size, uint64_t *hpa_size)
+{
+ uint32_t i;
+ struct guest_page *page;
+ struct guest_page key;
+
+ *hpa_size = gpa_size;
+ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
+ key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
+ page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
+ sizeof(struct guest_page), guest_page_addrcmp);
+ if (page) {
+ if (gpa + gpa_size <=
+ page->guest_phys_addr + page->size) {
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ } else if (gpa < page->guest_phys_addr +
+ page->size) {
+ *hpa_size = page->guest_phys_addr +
+ page->size - gpa;
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ }
+ }
+ } else {
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+
+ if (gpa >= page->guest_phys_addr) {
+ if (gpa + gpa_size <=
+ page->guest_phys_addr + page->size) {
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ } else if (gpa < page->guest_phys_addr +
+ page->size) {
+ *hpa_size = page->guest_phys_addr +
+ page->size - gpa;
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ }
+ }
+ }
+ }
+
+ *hpa_size = 0;
+ return 0;
+}
+
static __rte_always_inline uint64_t
hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
{
void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
-void vhost_enable_dequeue_zero_copy(int vid);
void vhost_set_builtin_virtio_net(int vid, bool enable);
void vhost_enable_extbuf(int vid);
void vhost_enable_linearbuf(int vid);
+int vhost_enable_guest_notification(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable);
struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
/* Don't kick guest if we don't reach index specified by guest. */
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
- uint16_t new = vq->last_used_idx;
+ uint16_t new = vq->async_pkts_inflight_n ?
+ vq->used->idx:vq->last_used_idx;
bool signalled_used_valid = vq->signalled_used_valid;
vq->signalled_used = new;
return true;
}
-static __rte_always_inline void
-put_zmbuf(struct zcopy_mbuf *zmbuf)
-{
- zmbuf->in_use = 0;
-}
-
#endif /* _VHOST_NET_CDEV_H_ */