#include "rte_vhost.h"
#include "rte_vdpa.h"
-#include "rte_vdpa_dev.h"
+#include "vdpa_driver.h"
#include "rte_vhost_async.h"
#define MAX_PKT_BURST 32
-#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
-#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
+#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST)
+#define VHOST_MAX_ASYNC_VEC 2048
#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
uint32_t count;
};
+/**
+ * inflight async packet information
+ */
+struct async_inflight_info {
+ struct rte_mbuf *mbuf;
+ uint16_t descs; /* num of descs inflight */
+ uint16_t nr_buffers; /* num of buffers inflight for packed ring */
+};
+
+struct vhost_async {
+ /* operation callbacks for DMA */
+ struct rte_vhost_async_channel_ops ops;
+
+ struct rte_vhost_iov_iter iov_iter[VHOST_MAX_ASYNC_IT];
+ struct rte_vhost_iovec iovec[VHOST_MAX_ASYNC_VEC];
+ uint16_t iter_idx;
+ uint16_t iovec_idx;
+
+ /* data transfer status */
+ struct async_inflight_info *pkts_info;
+ uint16_t pkts_idx;
+ uint16_t pkts_inflight_n;
+ union {
+ struct vring_used_elem *descs_split;
+ struct vring_used_elem_packed *buffers_packed;
+ };
+ union {
+ uint16_t desc_idx_split;
+ uint16_t buffer_idx_packed;
+ };
+ union {
+ uint16_t last_desc_idx_split;
+ uint16_t last_buffer_idx_packed;
+ };
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct rte_vhost_resubmit_info *resubmit_inflight;
uint64_t global_counter;
- /* operation callbacks for async dma */
- struct rte_vhost_async_channel_ops async_ops;
-
- struct rte_vhost_iov_iter *it_pool;
- struct iovec *vec_pool;
-
- /* async data transfer status */
- struct async_inflight_info *async_pkts_info;
- uint16_t async_pkts_idx;
- uint16_t async_pkts_inflight_n;
- uint16_t async_last_pkts_n;
- union {
- struct vring_used_elem *async_descs_split;
- struct vring_used_elem_packed *async_buffers_packed;
- };
- union {
- uint16_t async_desc_idx_split;
- uint16_t async_buffer_idx_packed;
- };
- union {
- uint16_t last_async_desc_idx_split;
- uint16_t last_async_buffer_idx_packed;
- };
-
- /* vq async features */
- bool async_registered;
- uint32_t async_threshold;
+ struct vhost_async *async;
int notif_enable;
#define VIRTIO_UNINITIALIZED_NOTIF (-1)
int16_t broadcast_rarp;
uint32_t nr_vring;
int async_copy;
+
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
uint16_t mtu;
uint8_t status;
- struct vhost_device_ops const *notify_ops;
+ struct rte_vhost_device_ops const *notify_ops;
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
return 0;
}
+static __rte_always_inline int guest_page_rangecmp(const void *p1, const void *p2)
+{
+ const struct guest_page *page1 = (const struct guest_page *)p1;
+ const struct guest_page *page2 = (const struct guest_page *)p2;
+
+ if (page1->guest_phys_addr >= page2->guest_phys_addr) {
+ if (page1->guest_phys_addr < page2->guest_phys_addr + page2->size)
+ return 0;
+ else
+ return 1;
+ } else
+ return -1;
+}
+
static __rte_always_inline rte_iova_t
gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
uint64_t gpa_size, uint64_t *hpa_size)
*hpa_size = gpa_size;
if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
- key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
+ key.guest_phys_addr = gpa;
page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
- sizeof(struct guest_page), guest_page_addrcmp);
+ sizeof(struct guest_page), guest_page_rangecmp);
if (page) {
if (gpa + gpa_size <=
page->guest_phys_addr + page->size) {
int vhost_enable_guest_notification(struct virtio_net *dev,
struct vhost_virtqueue *vq, int enable);
-struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
+struct rte_vhost_device_ops const *vhost_driver_callback_get(const char *path);
/*
* Backend-specific cleanup.